blob: ac2709744d7388d2792d3b4992bd703fa565dab3 [file] [log] [blame]
// This code is automatically generated. DO NOT MODIFY.
//
// Instead, modify `crates/stdarch-gen/neon.spec` and run the following command to re-generate this file:
//
// ```
// OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen -- crates/stdarch-gen/neon.spec
// ```
use super::*;
#[cfg(test)]
use stdarch_test::assert_instr;
/// Vector bitwise and
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
simd_and(a, b)
}
/// Vector bitwise and
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
simd_and(a, b)
}
/// Vector bitwise and
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
simd_and(a, b)
}
/// Vector bitwise and
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
simd_and(a, b)
}
/// Vector bitwise and
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
simd_and(a, b)
}
/// Vector bitwise and
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
simd_and(a, b)
}
/// Vector bitwise and
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
simd_and(a, b)
}
/// Vector bitwise and
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
simd_and(a, b)
}
/// Vector bitwise and
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
simd_and(a, b)
}
/// Vector bitwise and
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
simd_and(a, b)
}
/// Vector bitwise and
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
simd_and(a, b)
}
/// Vector bitwise and
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
simd_and(a, b)
}
/// Vector bitwise and
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
simd_and(a, b)
}
/// Vector bitwise and
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
simd_and(a, b)
}
/// Vector bitwise and
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
simd_and(a, b)
}
/// Vector bitwise and
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
simd_and(a, b)
}
/// Vector bitwise or (immediate, inclusive)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
simd_or(a, b)
}
/// Vector bitwise or (immediate, inclusive)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
simd_or(a, b)
}
/// Vector bitwise or (immediate, inclusive)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
simd_or(a, b)
}
/// Vector bitwise or (immediate, inclusive)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
simd_or(a, b)
}
/// Vector bitwise or (immediate, inclusive)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
simd_or(a, b)
}
/// Vector bitwise or (immediate, inclusive)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
simd_or(a, b)
}
/// Vector bitwise or (immediate, inclusive)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
simd_or(a, b)
}
/// Vector bitwise or (immediate, inclusive)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
simd_or(a, b)
}
/// Vector bitwise or (immediate, inclusive)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
simd_or(a, b)
}
/// Vector bitwise or (immediate, inclusive)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
simd_or(a, b)
}
/// Vector bitwise or (immediate, inclusive)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
simd_or(a, b)
}
/// Vector bitwise or (immediate, inclusive)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
simd_or(a, b)
}
/// Vector bitwise or (immediate, inclusive)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
simd_or(a, b)
}
/// Vector bitwise or (immediate, inclusive)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
simd_or(a, b)
}
/// Vector bitwise or (immediate, inclusive)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
simd_or(a, b)
}
/// Vector bitwise or (immediate, inclusive)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
simd_or(a, b)
}
/// Vector bitwise exclusive or (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
simd_xor(a, b)
}
/// Vector bitwise exclusive or (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
simd_xor(a, b)
}
/// Vector bitwise exclusive or (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
simd_xor(a, b)
}
/// Vector bitwise exclusive or (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
simd_xor(a, b)
}
/// Vector bitwise exclusive or (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
simd_xor(a, b)
}
/// Vector bitwise exclusive or (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
simd_xor(a, b)
}
/// Vector bitwise exclusive or (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
simd_xor(a, b)
}
/// Vector bitwise exclusive or (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
simd_xor(a, b)
}
/// Vector bitwise exclusive or (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
simd_xor(a, b)
}
/// Vector bitwise exclusive or (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
simd_xor(a, b)
}
/// Vector bitwise exclusive or (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
simd_xor(a, b)
}
/// Vector bitwise exclusive or (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
simd_xor(a, b)
}
/// Vector bitwise exclusive or (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
simd_xor(a, b)
}
/// Vector bitwise exclusive or (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
simd_xor(a, b)
}
/// Vector bitwise exclusive or (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
simd_xor(a, b)
}
/// Vector bitwise exclusive or (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
simd_xor(a, b)
}
/// Absolute difference between the arguments
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sabd.v8i8")]
fn vabd_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t;
}
vabd_s8_(a, b)
}
/// Absolute difference between the arguments
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sabd.v16i8")]
fn vabdq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t;
}
vabdq_s8_(a, b)
}
/// Absolute difference between the arguments
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sabd.v4i16")]
fn vabd_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t;
}
vabd_s16_(a, b)
}
/// Absolute difference between the arguments
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sabd.v8i16")]
fn vabdq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t;
}
vabdq_s16_(a, b)
}
/// Absolute difference between the arguments
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sabd.v2i32")]
fn vabd_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t;
}
vabd_s32_(a, b)
}
/// Absolute difference between the arguments
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sabd.v4i32")]
fn vabdq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t;
}
vabdq_s32_(a, b)
}
/// Absolute difference between the arguments
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uabd.v8i8")]
fn vabd_u8_(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t;
}
vabd_u8_(a, b)
}
/// Absolute difference between the arguments
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uabd.v16i8")]
fn vabdq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
}
vabdq_u8_(a, b)
}
/// Absolute difference between the arguments
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uabd.v4i16")]
fn vabd_u16_(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t;
}
vabd_u16_(a, b)
}
/// Absolute difference between the arguments
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uabd.v8i16")]
fn vabdq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
}
vabdq_u16_(a, b)
}
/// Absolute difference between the arguments
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uabd.v2i32")]
fn vabd_u32_(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t;
}
vabd_u32_(a, b)
}
/// Absolute difference between the arguments
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabdu.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uabd.v4i32")]
fn vabdq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
}
vabdq_u32_(a, b)
}
/// Absolute difference between the arguments of Floating
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fabd.v2f32")]
fn vabd_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
}
vabd_f32_(a, b)
}
/// Absolute difference between the arguments of Floating
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vabds.v4f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fabd.v4f32")]
fn vabdq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
}
vabdq_f32_(a, b)
}
/// Unsigned Absolute difference Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabdl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t {
simd_cast(vabd_u8(a, b))
}
/// Unsigned Absolute difference Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabdl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t {
simd_cast(vabd_u16(a, b))
}
/// Unsigned Absolute difference Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabdl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t {
simd_cast(vabd_u32(a, b))
}
/// Signed Absolute difference Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabdl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t {
let c: uint8x8_t = simd_cast(vabd_s8(a, b));
simd_cast(c)
}
/// Signed Absolute difference Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabdl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t {
let c: uint16x4_t = simd_cast(vabd_s16(a, b));
simd_cast(c)
}
/// Signed Absolute difference Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabdl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t {
let c: uint32x2_t = simd_cast(vabd_s32(a, b));
simd_cast(c)
}
/// Compare bitwise Equal (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
simd_eq(a, b)
}
/// Compare bitwise Equal (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
simd_eq(a, b)
}
/// Compare bitwise Equal (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
simd_eq(a, b)
}
/// Compare bitwise Equal (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
simd_eq(a, b)
}
/// Compare bitwise Equal (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
simd_eq(a, b)
}
/// Compare bitwise Equal (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
simd_eq(a, b)
}
/// Compare bitwise Equal (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t {
simd_eq(a, b)
}
/// Compare bitwise Equal (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t {
simd_eq(a, b)
}
/// Compare bitwise Equal (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t {
simd_eq(a, b)
}
/// Compare bitwise Equal (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t {
simd_eq(a, b)
}
/// Compare bitwise Equal (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t {
simd_eq(a, b)
}
/// Compare bitwise Equal (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t {
simd_eq(a, b)
}
/// Compare bitwise Equal (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t {
simd_eq(a, b)
}
/// Compare bitwise Equal (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t {
simd_eq(a, b)
}
/// Floating-point compare equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmeq))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t {
simd_eq(a, b)
}
/// Floating-point compare equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmeq))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
simd_eq(a, b)
}
/// Signed compare bitwise Test bits nonzero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t {
let c: int8x8_t = simd_and(a, b);
let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
simd_ne(c, transmute(d))
}
/// Signed compare bitwise Test bits nonzero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t {
let c: int8x16_t = simd_and(a, b);
let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
simd_ne(c, transmute(d))
}
/// Signed compare bitwise Test bits nonzero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t {
let c: int16x4_t = simd_and(a, b);
let d: i16x4 = i16x4::new(0, 0, 0, 0);
simd_ne(c, transmute(d))
}
/// Signed compare bitwise Test bits nonzero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t {
let c: int16x8_t = simd_and(a, b);
let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
simd_ne(c, transmute(d))
}
/// Signed compare bitwise Test bits nonzero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t {
let c: int32x2_t = simd_and(a, b);
let d: i32x2 = i32x2::new(0, 0);
simd_ne(c, transmute(d))
}
/// Signed compare bitwise Test bits nonzero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t {
let c: int32x4_t = simd_and(a, b);
let d: i32x4 = i32x4::new(0, 0, 0, 0);
simd_ne(c, transmute(d))
}
/// Signed compare bitwise Test bits nonzero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t {
let c: poly8x8_t = simd_and(a, b);
let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
simd_ne(c, transmute(d))
}
/// Signed compare bitwise Test bits nonzero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t {
let c: poly8x16_t = simd_and(a, b);
let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
simd_ne(c, transmute(d))
}
/// Signed compare bitwise Test bits nonzero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t {
let c: poly16x4_t = simd_and(a, b);
let d: i16x4 = i16x4::new(0, 0, 0, 0);
simd_ne(c, transmute(d))
}
/// Signed compare bitwise Test bits nonzero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t {
let c: poly16x8_t = simd_and(a, b);
let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
simd_ne(c, transmute(d))
}
/// Unsigned compare bitwise Test bits nonzero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
let c: uint8x8_t = simd_and(a, b);
let d: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
simd_ne(c, transmute(d))
}
/// Unsigned compare bitwise Test bits nonzero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
let c: uint8x16_t = simd_and(a, b);
let d: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
simd_ne(c, transmute(d))
}
/// Unsigned compare bitwise Test bits nonzero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
let c: uint16x4_t = simd_and(a, b);
let d: u16x4 = u16x4::new(0, 0, 0, 0);
simd_ne(c, transmute(d))
}
/// Unsigned compare bitwise Test bits nonzero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
let c: uint16x8_t = simd_and(a, b);
let d: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
simd_ne(c, transmute(d))
}
/// Unsigned compare bitwise Test bits nonzero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
let c: uint32x2_t = simd_and(a, b);
let d: u32x2 = u32x2::new(0, 0);
simd_ne(c, transmute(d))
}
/// Unsigned compare bitwise Test bits nonzero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
let c: uint32x4_t = simd_and(a, b);
let d: u32x4 = u32x4::new(0, 0, 0, 0);
simd_ne(c, transmute(d))
}
/// Floating-point absolute value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t {
simd_fabs(a)
}
/// Floating-point absolute value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t {
simd_fabs(a)
}
/// Compare signed greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t {
simd_gt(a, b)
}
/// Compare signed greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t {
simd_gt(a, b)
}
/// Compare signed greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t {
simd_gt(a, b)
}
/// Compare signed greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t {
simd_gt(a, b)
}
/// Compare signed greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t {
simd_gt(a, b)
}
/// Compare signed greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t {
simd_gt(a, b)
}
/// Compare unsigned highe
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
simd_gt(a, b)
}
/// Compare unsigned highe
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
simd_gt(a, b)
}
/// Compare unsigned highe
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
simd_gt(a, b)
}
/// Compare unsigned highe
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
simd_gt(a, b)
}
/// Compare unsigned highe
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
simd_gt(a, b)
}
/// Compare unsigned highe
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
simd_gt(a, b)
}
/// Floating-point compare greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t {
simd_gt(a, b)
}
/// Floating-point compare greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
simd_gt(a, b)
}
/// Compare signed less than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t {
simd_lt(a, b)
}
/// Compare signed less than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t {
simd_lt(a, b)
}
/// Compare signed less than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t {
simd_lt(a, b)
}
/// Compare signed less than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t {
simd_lt(a, b)
}
/// Compare signed less than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t {
simd_lt(a, b)
}
/// Compare signed less than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t {
simd_lt(a, b)
}
/// Compare unsigned less than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
simd_lt(a, b)
}
/// Compare unsigned less than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
simd_lt(a, b)
}
/// Compare unsigned less than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
simd_lt(a, b)
}
/// Compare unsigned less than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
simd_lt(a, b)
}
/// Compare unsigned less than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
simd_lt(a, b)
}
/// Compare unsigned less than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
simd_lt(a, b)
}
/// Floating-point compare less than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t {
simd_lt(a, b)
}
/// Floating-point compare less than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
simd_lt(a, b)
}
/// Compare signed less than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t {
simd_le(a, b)
}
/// Compare signed less than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t {
simd_le(a, b)
}
/// Compare signed less than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t {
simd_le(a, b)
}
/// Compare signed less than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t {
simd_le(a, b)
}
/// Compare signed less than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t {
simd_le(a, b)
}
/// Compare signed less than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t {
simd_le(a, b)
}
/// Compare unsigned less than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
simd_le(a, b)
}
/// Compare unsigned less than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
simd_le(a, b)
}
/// Compare unsigned less than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
simd_le(a, b)
}
/// Compare unsigned less than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
simd_le(a, b)
}
/// Compare unsigned less than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
simd_le(a, b)
}
/// Compare unsigned less than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
simd_le(a, b)
}
/// Floating-point compare less than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t {
simd_le(a, b)
}
/// Floating-point compare less than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
simd_le(a, b)
}
/// Compare signed greater than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t {
simd_ge(a, b)
}
/// Compare signed greater than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t {
simd_ge(a, b)
}
/// Compare signed greater than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t {
simd_ge(a, b)
}
/// Compare signed greater than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t {
simd_ge(a, b)
}
/// Compare signed greater than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t {
simd_ge(a, b)
}
/// Compare signed greater than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t {
simd_ge(a, b)
}
/// Compare unsigned greater than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
simd_ge(a, b)
}
/// Compare unsigned greater than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
simd_ge(a, b)
}
/// Compare unsigned greater than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
simd_ge(a, b)
}
/// Compare unsigned greater than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
simd_ge(a, b)
}
/// Compare unsigned greater than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
simd_ge(a, b)
}
/// Compare unsigned greater than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
simd_ge(a, b)
}
/// Floating-point compare greater than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t {
simd_ge(a, b)
}
/// Floating-point compare greater than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
simd_ge(a, b)
}
/// Count leading sign bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.cls.v8i8")]
fn vcls_s8_(a: int8x8_t) -> int8x8_t;
}
vcls_s8_(a)
}
/// Count leading sign bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.cls.v16i8")]
fn vclsq_s8_(a: int8x16_t) -> int8x16_t;
}
vclsq_s8_(a)
}
/// Count leading sign bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.cls.v4i16")]
fn vcls_s16_(a: int16x4_t) -> int16x4_t;
}
vcls_s16_(a)
}
/// Count leading sign bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.cls.v8i16")]
fn vclsq_s16_(a: int16x8_t) -> int16x8_t;
}
vclsq_s16_(a)
}
/// Count leading sign bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.cls.v2i32")]
fn vcls_s32_(a: int32x2_t) -> int32x2_t;
}
vcls_s32_(a)
}
/// Count leading sign bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcls.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.cls.v4i32")]
fn vclsq_s32_(a: int32x4_t) -> int32x4_t;
}
vclsq_s32_(a)
}
/// Count leading sign bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t {
transmute(vcls_s8(transmute(a)))
}
/// Count leading sign bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t {
transmute(vclsq_s8(transmute(a)))
}
/// Count leading sign bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t {
transmute(vcls_s16(transmute(a)))
}
/// Count leading sign bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t {
transmute(vclsq_s16(transmute(a)))
}
/// Count leading sign bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t {
transmute(vcls_s32(transmute(a)))
}
/// Count leading sign bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t {
transmute(vclsq_s32(transmute(a)))
}
/// Count leading zero bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t {
vclz_s8_(a)
}
/// Count leading zero bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t {
vclzq_s8_(a)
}
/// Count leading zero bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t {
vclz_s16_(a)
}
/// Count leading zero bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t {
vclzq_s16_(a)
}
/// Count leading zero bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t {
vclz_s32_(a)
}
/// Count leading zero bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t {
vclzq_s32_(a)
}
/// Count leading zero bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t {
transmute(vclz_s8_(transmute(a)))
}
/// Count leading zero bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t {
transmute(vclzq_s8_(transmute(a)))
}
/// Count leading zero bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t {
transmute(vclz_s16_(transmute(a)))
}
/// Count leading zero bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t {
transmute(vclzq_s16_(transmute(a)))
}
/// Count leading zero bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t {
transmute(vclz_s32_(transmute(a)))
}
/// Count leading zero bits
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t {
transmute(vclzq_s32_(transmute(a)))
}
/// Floating-point absolute compare greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v2i32.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facgt.v2i32.v2f32")]
fn vcagt_f32_(a: float32x2_t, b: float32x2_t) -> uint32x2_t;
}
vcagt_f32_(a, b)
}
/// Floating-point absolute compare greater than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacgt.v4i32.v4f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facgt.v4i32.v4f32")]
fn vcagtq_f32_(a: float32x4_t, b: float32x4_t) -> uint32x4_t;
}
vcagtq_f32_(a, b)
}
/// Floating-point absolute compare greater than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v2i32.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facge.v2i32.v2f32")]
fn vcage_f32_(a: float32x2_t, b: float32x2_t) -> uint32x2_t;
}
vcage_f32_(a, b)
}
/// Floating-point absolute compare greater than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vacge.v4i32.v4f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.facge.v4i32.v4f32")]
fn vcageq_f32_(a: float32x4_t, b: float32x4_t) -> uint32x4_t;
}
vcageq_f32_(a, b)
}
/// Floating-point absolute compare less than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t {
vcagt_f32(b, a)
}
/// Floating-point absolute compare less than
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
vcagtq_f32(b, a)
}
/// Floating-point absolute compare less than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t {
vcage_f32(b, a)
}
/// Floating-point absolute compare less than or equal
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t {
vcageq_f32(b, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcreate_s8(a: u64) -> int8x8_t {
transmute(a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcreate_s16(a: u64) -> int16x4_t {
transmute(a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcreate_s32(a: u64) -> int32x2_t {
transmute(a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcreate_s64(a: u64) -> int64x1_t {
transmute(a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t {
transmute(a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t {
transmute(a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t {
transmute(a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcreate_u64(a: u64) -> uint64x1_t {
transmute(a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t {
transmute(a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t {
transmute(a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t {
transmute(a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcreate_f32(a: u64) -> float32x2_t {
transmute(a)
}
/// Fixed-point convert to floating-point
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(scvtf))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t {
simd_cast(a)
}
/// Fixed-point convert to floating-point
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(scvtf))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t {
simd_cast(a)
}
/// Fixed-point convert to floating-point
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ucvtf))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t {
simd_cast(a)
}
/// Fixed-point convert to floating-point
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ucvtf))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t {
simd_cast(a)
}
/// Fixed-point convert to floating-point
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vcvt, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vcvt_n_f32_s32<const N: i32>(a: int32x2_t) -> float32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32")]
fn vcvt_n_f32_s32_(a: int32x2_t, n: i32) -> float32x2_t;
}
vcvt_n_f32_s32_(a, N)
}
/// Fixed-point convert to floating-point
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(scvtf, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvt_n_f32_s32<const N: i32>(a: int32x2_t) -> float32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32")]
fn vcvt_n_f32_s32_(a: int32x2_t, n: i32) -> float32x2_t;
}
vcvt_n_f32_s32_(a, N)
}
/// Fixed-point convert to floating-point
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vcvt, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vcvtq_n_f32_s32<const N: i32>(a: int32x4_t) -> float32x4_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32")]
fn vcvtq_n_f32_s32_(a: int32x4_t, n: i32) -> float32x4_t;
}
vcvtq_n_f32_s32_(a, N)
}
/// Fixed-point convert to floating-point
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(scvtf, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvtq_n_f32_s32<const N: i32>(a: int32x4_t) -> float32x4_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32")]
fn vcvtq_n_f32_s32_(a: int32x4_t, n: i32) -> float32x4_t;
}
vcvtq_n_f32_s32_(a, N)
}
/// Fixed-point convert to floating-point
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vcvt, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vcvt_n_f32_u32<const N: i32>(a: uint32x2_t) -> float32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32")]
fn vcvt_n_f32_u32_(a: uint32x2_t, n: i32) -> float32x2_t;
}
vcvt_n_f32_u32_(a, N)
}
/// Fixed-point convert to floating-point
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvt_n_f32_u32<const N: i32>(a: uint32x2_t) -> float32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32")]
fn vcvt_n_f32_u32_(a: uint32x2_t, n: i32) -> float32x2_t;
}
vcvt_n_f32_u32_(a, N)
}
/// Fixed-point convert to floating-point
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vcvt, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vcvtq_n_f32_u32<const N: i32>(a: uint32x4_t) -> float32x4_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32")]
fn vcvtq_n_f32_u32_(a: uint32x4_t, n: i32) -> float32x4_t;
}
vcvtq_n_f32_u32_(a, N)
}
/// Fixed-point convert to floating-point
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvtq_n_f32_u32<const N: i32>(a: uint32x4_t) -> float32x4_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32")]
fn vcvtq_n_f32_u32_(a: uint32x4_t, n: i32) -> float32x4_t;
}
vcvtq_n_f32_u32_(a, N)
}
/// Floating-point convert to fixed-point, rounding toward zero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vcvt, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vcvt_n_s32_f32<const N: i32>(a: float32x2_t) -> int32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32")]
fn vcvt_n_s32_f32_(a: float32x2_t, n: i32) -> int32x2_t;
}
vcvt_n_s32_f32_(a, N)
}
/// Floating-point convert to fixed-point, rounding toward zero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvt_n_s32_f32<const N: i32>(a: float32x2_t) -> int32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32")]
fn vcvt_n_s32_f32_(a: float32x2_t, n: i32) -> int32x2_t;
}
vcvt_n_s32_f32_(a, N)
}
/// Floating-point convert to fixed-point, rounding toward zero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vcvt, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vcvtq_n_s32_f32<const N: i32>(a: float32x4_t) -> int32x4_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32")]
fn vcvtq_n_s32_f32_(a: float32x4_t, n: i32) -> int32x4_t;
}
vcvtq_n_s32_f32_(a, N)
}
/// Floating-point convert to fixed-point, rounding toward zero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvtq_n_s32_f32<const N: i32>(a: float32x4_t) -> int32x4_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32")]
fn vcvtq_n_s32_f32_(a: float32x4_t, n: i32) -> int32x4_t;
}
vcvtq_n_s32_f32_(a, N)
}
/// Floating-point convert to fixed-point, rounding toward zero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vcvt, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vcvt_n_u32_f32<const N: i32>(a: float32x2_t) -> uint32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32")]
fn vcvt_n_u32_f32_(a: float32x2_t, n: i32) -> uint32x2_t;
}
vcvt_n_u32_f32_(a, N)
}
/// Floating-point convert to fixed-point, rounding toward zero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvt_n_u32_f32<const N: i32>(a: float32x2_t) -> uint32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32")]
fn vcvt_n_u32_f32_(a: float32x2_t, n: i32) -> uint32x2_t;
}
vcvt_n_u32_f32_(a, N)
}
/// Floating-point convert to fixed-point, rounding toward zero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vcvt, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vcvtq_n_u32_f32<const N: i32>(a: float32x4_t) -> uint32x4_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32")]
fn vcvtq_n_u32_f32_(a: float32x4_t, n: i32) -> uint32x4_t;
}
vcvtq_n_u32_f32_(a, N)
}
/// Floating-point convert to fixed-point, rounding toward zero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvtq_n_u32_f32<const N: i32>(a: float32x4_t) -> uint32x4_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32")]
fn vcvtq_n_u32_f32_(a: float32x4_t, n: i32) -> uint32x4_t;
}
vcvtq_n_u32_f32_(a, N)
}
/// Floating-point convert to signed fixed-point, rounding toward zero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v2i32.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.fptosi.sat.v2i32.v2f32")]
fn vcvt_s32_f32_(a: float32x2_t) -> int32x2_t;
}
vcvt_s32_f32_(a)
}
/// Floating-point convert to signed fixed-point, rounding toward zero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.fptosi.sat.v4i32.v4f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.fptosi.sat.v4i32.v4f32")]
fn vcvtq_s32_f32_(a: float32x4_t) -> int32x4_t;
}
vcvtq_s32_f32_(a)
}
/// Floating-point convert to unsigned fixed-point, rounding toward zero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzu))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v2i32.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.fptoui.sat.v2i32.v2f32")]
fn vcvt_u32_f32_(a: float32x2_t) -> uint32x2_t;
}
vcvt_u32_f32_(a)
}
/// Floating-point convert to unsigned fixed-point, rounding toward zero
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzu))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.fptoui.sat.v4i32.v4f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.fptoui.sat.v4i32.v4f32")]
fn vcvtq_u32_f32_(a: float32x4_t) -> uint32x4_t;
}
vcvtq_u32_f32_(a)
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
static_assert_imm3!(N);
simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
static_assert_imm4!(N);
simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
static_assert_imm2!(N);
simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
static_assert_imm3!(N);
simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
static_assert_imm1!(N);
simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
static_assert_imm2!(N);
simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_s8<const N: i32>(a: int8x16_t) -> int8x8_t {
static_assert_imm4!(N);
simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_s16<const N: i32>(a: int16x8_t) -> int16x4_t {
static_assert_imm3!(N);
simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_s32<const N: i32>(a: int32x4_t) -> int32x2_t {
static_assert_imm2!(N);
simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_s8<const N: i32>(a: int8x8_t) -> int8x16_t {
static_assert_imm3!(N);
simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_s16<const N: i32>(a: int16x4_t) -> int16x8_t {
static_assert_imm2!(N);
simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_s32<const N: i32>(a: int32x2_t) -> int32x4_t {
static_assert_imm1!(N);
simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
static_assert_imm3!(N);
simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
static_assert_imm4!(N);
simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
static_assert_imm2!(N);
simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
static_assert_imm3!(N);
simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
static_assert_imm1!(N);
simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
static_assert_imm2!(N);
simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_u8<const N: i32>(a: uint8x16_t) -> uint8x8_t {
static_assert_imm4!(N);
simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_u16<const N: i32>(a: uint16x8_t) -> uint16x4_t {
static_assert_imm3!(N);
simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_u32<const N: i32>(a: uint32x4_t) -> uint32x2_t {
static_assert_imm2!(N);
simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_u8<const N: i32>(a: uint8x8_t) -> uint8x16_t {
static_assert_imm3!(N);
simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_u16<const N: i32>(a: uint16x4_t) -> uint16x8_t {
static_assert_imm2!(N);
simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_u32<const N: i32>(a: uint32x2_t) -> uint32x4_t {
static_assert_imm1!(N);
simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_p8<const N: i32>(a: poly8x8_t) -> poly8x8_t {
static_assert_imm3!(N);
simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_p8<const N: i32>(a: poly8x16_t) -> poly8x16_t {
static_assert_imm4!(N);
simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_p16<const N: i32>(a: poly16x4_t) -> poly16x4_t {
static_assert_imm2!(N);
simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_p16<const N: i32>(a: poly16x8_t) -> poly16x8_t {
static_assert_imm3!(N);
simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_p8<const N: i32>(a: poly8x16_t) -> poly8x8_t {
static_assert_imm4!(N);
simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_p16<const N: i32>(a: poly16x8_t) -> poly16x4_t {
static_assert_imm3!(N);
simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_p8<const N: i32>(a: poly8x8_t) -> poly8x16_t {
static_assert_imm3!(N);
simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_p16<const N: i32>(a: poly16x4_t) -> poly16x8_t {
static_assert_imm2!(N);
simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
static_assert_imm1!(N);
simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 0))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_s64<const N: i32>(a: int64x1_t) -> int64x2_t {
static_assert!(N : i32 where N == 0);
simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
static_assert_imm1!(N);
simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 0))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_u64<const N: i32>(a: uint64x1_t) -> uint64x2_t {
static_assert!(N : i32 where N == 0);
simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_f32<const N: i32>(a: float32x2_t) -> float32x2_t {
static_assert_imm1!(N);
simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_f32<const N: i32>(a: float32x4_t) -> float32x4_t {
static_assert_imm2!(N);
simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_f32<const N: i32>(a: float32x4_t) -> float32x2_t {
static_assert_imm2!(N);
simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_f32<const N: i32>(a: float32x2_t) -> float32x4_t {
static_assert_imm1!(N);
simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 0))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
static_assert!(N : i32 where N == 0);
a
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 0))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
static_assert!(N : i32 where N == 0);
a
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 1))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_s64<const N: i32>(a: int64x2_t) -> int64x1_t {
static_assert_imm1!(N);
transmute::<i64, _>(simd_extract(a, N as u32))
}
/// Set all vector lanes to the same value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 1))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_u64<const N: i32>(a: uint64x2_t) -> uint64x1_t {
static_assert_imm1!(N);
transmute::<u64, _>(simd_extract(a, N as u32))
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
static_assert_imm3!(N);
match N & 0b111 {
0 => simd_shuffle8!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
1 => simd_shuffle8!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
2 => simd_shuffle8!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
3 => simd_shuffle8!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
4 => simd_shuffle8!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
5 => simd_shuffle8!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
6 => simd_shuffle8!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
7 => simd_shuffle8!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 15))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
static_assert_imm4!(N);
match N & 0b1111 {
0 => simd_shuffle16!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
1 => simd_shuffle16!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]),
2 => simd_shuffle16!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]),
3 => simd_shuffle16!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]),
4 => simd_shuffle16!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
5 => simd_shuffle16!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]),
6 => simd_shuffle16!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]),
7 => simd_shuffle16!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]),
8 => simd_shuffle16!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]),
9 => simd_shuffle16!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]),
10 => simd_shuffle16!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]),
11 => simd_shuffle16!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]),
12 => simd_shuffle16!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]),
13 => simd_shuffle16!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]),
14 => simd_shuffle16!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]),
15 => simd_shuffle16!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
static_assert_imm2!(N);
match N & 0b11 {
0 => simd_shuffle4!(a, b, [0, 1, 2, 3]),
1 => simd_shuffle4!(a, b, [1, 2, 3, 4]),
2 => simd_shuffle4!(a, b, [2, 3, 4, 5]),
3 => simd_shuffle4!(a, b, [3, 4, 5, 6]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
static_assert_imm3!(N);
match N & 0b111 {
0 => simd_shuffle8!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
1 => simd_shuffle8!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
2 => simd_shuffle8!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
3 => simd_shuffle8!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
4 => simd_shuffle8!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
5 => simd_shuffle8!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
6 => simd_shuffle8!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
7 => simd_shuffle8!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
static_assert_imm1!(N);
match N & 0b1 {
0 => simd_shuffle2!(a, b, [0, 1]),
1 => simd_shuffle2!(a, b, [1, 2]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
static_assert_imm2!(N);
match N & 0b11 {
0 => simd_shuffle4!(a, b, [0, 1, 2, 3]),
1 => simd_shuffle4!(a, b, [1, 2, 3, 4]),
2 => simd_shuffle4!(a, b, [2, 3, 4, 5]),
3 => simd_shuffle4!(a, b, [3, 4, 5, 6]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
static_assert_imm3!(N);
match N & 0b111 {
0 => simd_shuffle8!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
1 => simd_shuffle8!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
2 => simd_shuffle8!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
3 => simd_shuffle8!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
4 => simd_shuffle8!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
5 => simd_shuffle8!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
6 => simd_shuffle8!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
7 => simd_shuffle8!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 15))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
static_assert_imm4!(N);
match N & 0b1111 {
0 => simd_shuffle16!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
1 => simd_shuffle16!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]),
2 => simd_shuffle16!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]),
3 => simd_shuffle16!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]),
4 => simd_shuffle16!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
5 => simd_shuffle16!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]),
6 => simd_shuffle16!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]),
7 => simd_shuffle16!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]),
8 => simd_shuffle16!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]),
9 => simd_shuffle16!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]),
10 => simd_shuffle16!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]),
11 => simd_shuffle16!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]),
12 => simd_shuffle16!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]),
13 => simd_shuffle16!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]),
14 => simd_shuffle16!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]),
15 => simd_shuffle16!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
static_assert_imm2!(N);
match N & 0b11 {
0 => simd_shuffle4!(a, b, [0, 1, 2, 3]),
1 => simd_shuffle4!(a, b, [1, 2, 3, 4]),
2 => simd_shuffle4!(a, b, [2, 3, 4, 5]),
3 => simd_shuffle4!(a, b, [3, 4, 5, 6]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
static_assert_imm3!(N);
match N & 0b111 {
0 => simd_shuffle8!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
1 => simd_shuffle8!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
2 => simd_shuffle8!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
3 => simd_shuffle8!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
4 => simd_shuffle8!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
5 => simd_shuffle8!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
6 => simd_shuffle8!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
7 => simd_shuffle8!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
static_assert_imm1!(N);
match N & 0b1 {
0 => simd_shuffle2!(a, b, [0, 1]),
1 => simd_shuffle2!(a, b, [1, 2]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
static_assert_imm2!(N);
match N & 0b11 {
0 => simd_shuffle4!(a, b, [0, 1, 2, 3]),
1 => simd_shuffle4!(a, b, [1, 2, 3, 4]),
2 => simd_shuffle4!(a, b, [2, 3, 4, 5]),
3 => simd_shuffle4!(a, b, [3, 4, 5, 6]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
static_assert_imm3!(N);
match N & 0b111 {
0 => simd_shuffle8!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
1 => simd_shuffle8!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
2 => simd_shuffle8!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
3 => simd_shuffle8!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
4 => simd_shuffle8!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
5 => simd_shuffle8!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
6 => simd_shuffle8!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
7 => simd_shuffle8!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 15))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
static_assert_imm4!(N);
match N & 0b1111 {
0 => simd_shuffle16!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
1 => simd_shuffle16!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]),
2 => simd_shuffle16!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]),
3 => simd_shuffle16!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]),
4 => simd_shuffle16!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
5 => simd_shuffle16!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]),
6 => simd_shuffle16!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]),
7 => simd_shuffle16!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]),
8 => simd_shuffle16!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]),
9 => simd_shuffle16!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]),
10 => simd_shuffle16!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]),
11 => simd_shuffle16!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]),
12 => simd_shuffle16!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]),
13 => simd_shuffle16!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]),
14 => simd_shuffle16!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]),
15 => simd_shuffle16!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
static_assert_imm2!(N);
match N & 0b11 {
0 => simd_shuffle4!(a, b, [0, 1, 2, 3]),
1 => simd_shuffle4!(a, b, [1, 2, 3, 4]),
2 => simd_shuffle4!(a, b, [2, 3, 4, 5]),
3 => simd_shuffle4!(a, b, [3, 4, 5, 6]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
static_assert_imm3!(N);
match N & 0b111 {
0 => simd_shuffle8!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
1 => simd_shuffle8!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
2 => simd_shuffle8!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
3 => simd_shuffle8!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
4 => simd_shuffle8!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
5 => simd_shuffle8!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
6 => simd_shuffle8!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
7 => simd_shuffle8!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
static_assert_imm1!(N);
match N & 0b1 {
0 => simd_shuffle2!(a, b, [0, 1]),
1 => simd_shuffle2!(a, b, [1, 2]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
static_assert_imm1!(N);
match N & 0b1 {
0 => simd_shuffle2!(a, b, [0, 1]),
1 => simd_shuffle2!(a, b, [1, 2]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_f32<const N: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
static_assert_imm1!(N);
match N & 0b1 {
0 => simd_shuffle2!(a, b, [0, 1]),
1 => simd_shuffle2!(a, b, [1, 2]),
_ => unreachable_unchecked(),
}
}
/// Extract vector from pair of vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_f32<const N: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
static_assert_imm2!(N);
match N & 0b11 {
0 => simd_shuffle4!(a, b, [0, 1, 2, 3]),
1 => simd_shuffle4!(a, b, [1, 2, 3, 4]),
2 => simd_shuffle4!(a, b, [2, 3, 4, 5]),
3 => simd_shuffle4!(a, b, [3, 4, 5, 6]),
_ => unreachable_unchecked(),
}
}
/// Multiply-add to accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
simd_add(a, simd_mul(b, c))
}
/// Multiply-add to accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
simd_add(a, simd_mul(b, c))
}
/// Multiply-add to accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
simd_add(a, simd_mul(b, c))
}
/// Multiply-add to accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
simd_add(a, simd_mul(b, c))
}
/// Multiply-add to accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
simd_add(a, simd_mul(b, c))
}
/// Multiply-add to accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
simd_add(a, simd_mul(b, c))
}
/// Multiply-add to accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
simd_add(a, simd_mul(b, c))
}
/// Multiply-add to accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
simd_add(a, simd_mul(b, c))
}
/// Multiply-add to accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t {
simd_add(a, simd_mul(b, c))
}
/// Multiply-add to accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
simd_add(a, simd_mul(b, c))
}
/// Multiply-add to accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t {
simd_add(a, simd_mul(b, c))
}
/// Multiply-add to accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
simd_add(a, simd_mul(b, c))
}
/// Floating-point multiply-add to accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
simd_add(a, simd_mul(b, c))
}
/// Floating-point multiply-add to accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
simd_add(a, simd_mul(b, c))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t {
vmla_s16(a, b, vdup_n_s16(c))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t {
vmlaq_s16(a, b, vdupq_n_s16(c))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t {
vmla_s32(a, b, vdup_n_s32(c))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t {
vmlaq_s32(a, b, vdupq_n_s32(c))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t {
vmla_u16(a, b, vdup_n_u16(c))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t {
vmlaq_u16(a, b, vdupq_n_u16(c))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t {
vmla_u32(a, b, vdup_n_u32(c))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t {
vmlaq_u32(a, b, vdupq_n_u32(c))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t {
vmla_f32(a, b, vdup_n_f32(c))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t {
vmlaq_f32(a, b, vdupq_n_f32(c))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
static_assert_imm2!(LANE);
vmla_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
static_assert_imm3!(LANE);
vmla_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
static_assert_imm2!(LANE);
vmlaq_s16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
static_assert_imm3!(LANE);
vmlaq_s16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
static_assert_imm1!(LANE);
vmla_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
static_assert_imm2!(LANE);
vmla_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
static_assert_imm1!(LANE);
vmlaq_s32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
static_assert_imm2!(LANE);
vmlaq_s32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t {
static_assert_imm2!(LANE);
vmla_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t {
static_assert_imm3!(LANE);
vmla_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t {
static_assert_imm2!(LANE);
vmlaq_u16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
static_assert_imm3!(LANE);
vmlaq_u16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t {
static_assert_imm1!(LANE);
vmla_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t {
static_assert_imm2!(LANE);
vmla_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t {
static_assert_imm1!(LANE);
vmlaq_u32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
static_assert_imm2!(LANE);
vmlaq_u32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
static_assert_imm1!(LANE);
vmla_f32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
static_assert_imm2!(LANE);
vmla_f32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
static_assert_imm1!(LANE);
vmlaq_f32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
static_assert_imm2!(LANE);
vmlaq_f32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Signed multiply-add long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t {
simd_add(a, vmull_s8(b, c))
}
/// Signed multiply-add long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t {
simd_add(a, vmull_s16(b, c))
}
/// Signed multiply-add long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t {
simd_add(a, vmull_s32(b, c))
}
/// Unsigned multiply-add long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t {
simd_add(a, vmull_u8(b, c))
}
/// Unsigned multiply-add long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t {
simd_add(a, vmull_u16(b, c))
}
/// Unsigned multiply-add long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t {
simd_add(a, vmull_u32(b, c))
}
/// Vector widening multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t {
vmlal_s16(a, b, vdup_n_s16(c))
}
/// Vector widening multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t {
vmlal_s32(a, b, vdup_n_s32(c))
}
/// Vector widening multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t {
vmlal_u16(a, b, vdup_n_u16(c))
}
/// Vector widening multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t {
vmlal_u32(a, b, vdup_n_u32(c))
}
/// Vector widening multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t {
static_assert_imm2!(LANE);
vmlal_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector widening multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
static_assert_imm3!(LANE);
vmlal_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector widening multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t {
static_assert_imm1!(LANE);
vmlal_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector widening multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
static_assert_imm2!(LANE);
vmlal_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector widening multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t {
static_assert_imm2!(LANE);
vmlal_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector widening multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t {
static_assert_imm3!(LANE);
vmlal_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector widening multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t {
static_assert_imm1!(LANE);
vmlal_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector widening multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t {
static_assert_imm2!(LANE);
vmlal_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Multiply-subtract from accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
simd_sub(a, simd_mul(b, c))
}
/// Multiply-subtract from accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
simd_sub(a, simd_mul(b, c))
}
/// Multiply-subtract from accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
simd_sub(a, simd_mul(b, c))
}
/// Multiply-subtract from accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
simd_sub(a, simd_mul(b, c))
}
/// Multiply-subtract from accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
simd_sub(a, simd_mul(b, c))
}
/// Multiply-subtract from accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
simd_sub(a, simd_mul(b, c))
}
/// Multiply-subtract from accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
simd_sub(a, simd_mul(b, c))
}
/// Multiply-subtract from accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
simd_sub(a, simd_mul(b, c))
}
/// Multiply-subtract from accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t {
simd_sub(a, simd_mul(b, c))
}
/// Multiply-subtract from accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
simd_sub(a, simd_mul(b, c))
}
/// Multiply-subtract from accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t {
simd_sub(a, simd_mul(b, c))
}
/// Multiply-subtract from accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
simd_sub(a, simd_mul(b, c))
}
/// Floating-point multiply-subtract from accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
simd_sub(a, simd_mul(b, c))
}
/// Floating-point multiply-subtract from accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
simd_sub(a, simd_mul(b, c))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t {
vmls_s16(a, b, vdup_n_s16(c))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t {
vmlsq_s16(a, b, vdupq_n_s16(c))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t {
vmls_s32(a, b, vdup_n_s32(c))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t {
vmlsq_s32(a, b, vdupq_n_s32(c))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t {
vmls_u16(a, b, vdup_n_u16(c))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t {
vmlsq_u16(a, b, vdupq_n_u16(c))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t {
vmls_u32(a, b, vdup_n_u32(c))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t {
vmlsq_u32(a, b, vdupq_n_u32(c))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t {
vmls_f32(a, b, vdup_n_f32(c))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t {
vmlsq_f32(a, b, vdupq_n_f32(c))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
static_assert_imm2!(LANE);
vmls_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
static_assert_imm3!(LANE);
vmls_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
static_assert_imm2!(LANE);
vmlsq_s16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
static_assert_imm3!(LANE);
vmlsq_s16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
static_assert_imm1!(LANE);
vmls_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
static_assert_imm2!(LANE);
vmls_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
static_assert_imm1!(LANE);
vmlsq_s32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
static_assert_imm2!(LANE);
vmlsq_s32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t {
static_assert_imm2!(LANE);
vmls_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t {
static_assert_imm3!(LANE);
vmls_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t {
static_assert_imm2!(LANE);
vmlsq_u16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
static_assert_imm3!(LANE);
vmlsq_u16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t {
static_assert_imm1!(LANE);
vmls_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t {
static_assert_imm2!(LANE);
vmls_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t {
static_assert_imm1!(LANE);
vmlsq_u32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
static_assert_imm2!(LANE);
vmlsq_u32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
static_assert_imm1!(LANE);
vmls_f32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
static_assert_imm2!(LANE);
vmls_f32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
static_assert_imm1!(LANE);
vmlsq_f32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
static_assert_imm2!(LANE);
vmlsq_f32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Signed multiply-subtract long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t {
simd_sub(a, vmull_s8(b, c))
}
/// Signed multiply-subtract long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t {
simd_sub(a, vmull_s16(b, c))
}
/// Signed multiply-subtract long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t {
simd_sub(a, vmull_s32(b, c))
}
/// Unsigned multiply-subtract long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t {
simd_sub(a, vmull_u8(b, c))
}
/// Unsigned multiply-subtract long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t {
simd_sub(a, vmull_u16(b, c))
}
/// Unsigned multiply-subtract long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t {
simd_sub(a, vmull_u32(b, c))
}
/// Vector widening multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t {
vmlsl_s16(a, b, vdup_n_s16(c))
}
/// Vector widening multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t {
vmlsl_s32(a, b, vdup_n_s32(c))
}
/// Vector widening multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t {
vmlsl_u16(a, b, vdup_n_u16(c))
}
/// Vector widening multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t {
vmlsl_u32(a, b, vdup_n_u32(c))
}
/// Vector widening multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t {
static_assert_imm2!(LANE);
vmlsl_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector widening multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
static_assert_imm3!(LANE);
vmlsl_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector widening multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t {
static_assert_imm1!(LANE);
vmlsl_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector widening multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
static_assert_imm2!(LANE);
vmlsl_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector widening multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t {
static_assert_imm2!(LANE);
vmlsl_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector widening multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t {
static_assert_imm3!(LANE);
vmlsl_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector widening multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t {
static_assert_imm1!(LANE);
vmlsl_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector widening multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t {
static_assert_imm2!(LANE);
vmlsl_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Negate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t {
simd_neg(a)
}
/// Negate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t {
simd_neg(a)
}
/// Negate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t {
simd_neg(a)
}
/// Negate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t {
simd_neg(a)
}
/// Negate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t {
simd_neg(a)
}
/// Negate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t {
simd_neg(a)
}
/// Negate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fneg))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t {
simd_neg(a)
}
/// Negate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fneg))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t {
simd_neg(a)
}
/// Signed saturating negate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqneg.v8i8")]
fn vqneg_s8_(a: int8x8_t) -> int8x8_t;
}
vqneg_s8_(a)
}
/// Signed saturating negate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqneg.v16i8")]
fn vqnegq_s8_(a: int8x16_t) -> int8x16_t;
}
vqnegq_s8_(a)
}
/// Signed saturating negate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqneg.v4i16")]
fn vqneg_s16_(a: int16x4_t) -> int16x4_t;
}
vqneg_s16_(a)
}
/// Signed saturating negate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqneg.v8i16")]
fn vqnegq_s16_(a: int16x8_t) -> int16x8_t;
}
vqnegq_s16_(a)
}
/// Signed saturating negate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqneg.v2i32")]
fn vqneg_s32_(a: int32x2_t) -> int32x2_t;
}
vqneg_s32_(a)
}
/// Signed saturating negate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqneg.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqneg.v4i32")]
fn vqnegq_s32_(a: int32x4_t) -> int32x4_t;
}
vqnegq_s32_(a)
}
/// Saturating subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.v8i8")]
fn vqsub_u8_(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t;
}
vqsub_u8_(a, b)
}
/// Saturating subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.v16i8")]
fn vqsubq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
}
vqsubq_u8_(a, b)
}
/// Saturating subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.v4i16")]
fn vqsub_u16_(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t;
}
vqsub_u16_(a, b)
}
/// Saturating subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.v8i16")]
fn vqsubq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
}
vqsubq_u16_(a, b)
}
/// Saturating subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.v2i32")]
fn vqsub_u32_(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t;
}
vqsub_u32_(a, b)
}
/// Saturating subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.v4i32")]
fn vqsubq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
}
vqsubq_u32_(a, b)
}
/// Saturating subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v1i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.v1i64")]
fn vqsub_u64_(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t;
}
vqsub_u64_(a, b)
}
/// Saturating subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.usub.sat.v2i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqsub.v2i64")]
fn vqsubq_u64_(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
}
vqsubq_u64_(a, b)
}
/// Saturating subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.v8i8")]
fn vqsub_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t;
}
vqsub_s8_(a, b)
}
/// Saturating subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.v16i8")]
fn vqsubq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t;
}
vqsubq_s8_(a, b)
}
/// Saturating subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.v4i16")]
fn vqsub_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t;
}
vqsub_s16_(a, b)
}
/// Saturating subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.v8i16")]
fn vqsubq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t;
}
vqsubq_s16_(a, b)
}
/// Saturating subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.v2i32")]
fn vqsub_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t;
}
vqsub_s32_(a, b)
}
/// Saturating subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.v4i32")]
fn vqsubq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t;
}
vqsubq_s32_(a, b)
}
/// Saturating subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v1i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.v1i64")]
fn vqsub_s64_(a: int64x1_t, b: int64x1_t) -> int64x1_t;
}
vqsub_s64_(a, b)
}
/// Saturating subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.ssub.sat.v2i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqsub.v2i64")]
fn vqsubq_s64_(a: int64x2_t, b: int64x2_t) -> int64x2_t;
}
vqsubq_s64_(a, b)
}
/// Halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhadd.v8i8")]
fn vhadd_u8_(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t;
}
vhadd_u8_(a, b)
}
/// Halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhadd.v16i8")]
fn vhaddq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
}
vhaddq_u8_(a, b)
}
/// Halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhadd.v4i16")]
fn vhadd_u16_(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t;
}
vhadd_u16_(a, b)
}
/// Halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhadd.v8i16")]
fn vhaddq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
}
vhaddq_u16_(a, b)
}
/// Halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhadd.v2i32")]
fn vhadd_u32_(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t;
}
vhadd_u32_(a, b)
}
/// Halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhaddu.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhadd.v4i32")]
fn vhaddq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
}
vhaddq_u32_(a, b)
}
/// Halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shadd.v8i8")]
fn vhadd_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t;
}
vhadd_s8_(a, b)
}
/// Halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shadd.v16i8")]
fn vhaddq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t;
}
vhaddq_s8_(a, b)
}
/// Halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shadd.v4i16")]
fn vhadd_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t;
}
vhadd_s16_(a, b)
}
/// Halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shadd.v8i16")]
fn vhaddq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t;
}
vhaddq_s16_(a, b)
}
/// Halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shadd.v2i32")]
fn vhadd_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t;
}
vhadd_s32_(a, b)
}
/// Halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhadds.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shadd.v4i32")]
fn vhaddq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t;
}
vhaddq_s32_(a, b)
}
/// Rounding halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urhadd.v8i8")]
fn vrhadd_u8_(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t;
}
vrhadd_u8_(a, b)
}
/// Rounding halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urhadd.v16i8")]
fn vrhaddq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
}
vrhaddq_u8_(a, b)
}
/// Rounding halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urhadd.v4i16")]
fn vrhadd_u16_(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t;
}
vrhadd_u16_(a, b)
}
/// Rounding halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urhadd.v8i16")]
fn vrhaddq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
}
vrhaddq_u16_(a, b)
}
/// Rounding halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urhadd.v2i32")]
fn vrhadd_u32_(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t;
}
vrhadd_u32_(a, b)
}
/// Rounding halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhaddu.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urhadd.v4i32")]
fn vrhaddq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
}
vrhaddq_u32_(a, b)
}
/// Rounding halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srhadd.v8i8")]
fn vrhadd_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t;
}
vrhadd_s8_(a, b)
}
/// Rounding halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srhadd.v16i8")]
fn vrhaddq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t;
}
vrhaddq_s8_(a, b)
}
/// Rounding halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srhadd.v4i16")]
fn vrhadd_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t;
}
vrhadd_s16_(a, b)
}
/// Rounding halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srhadd.v8i16")]
fn vrhaddq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t;
}
vrhaddq_s16_(a, b)
}
/// Rounding halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srhadd.v2i32")]
fn vrhadd_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t;
}
vrhadd_s32_(a, b)
}
/// Rounding halving add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrhadds.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srhadd.v4i32")]
fn vrhaddq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t;
}
vrhaddq_s32_(a, b)
}
/// Floating-point round to integral, to nearest with ties to even
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frintn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frintn.v2f32")]
fn vrndn_f32_(a: float32x2_t) -> float32x2_t;
}
vrndn_f32_(a)
}
/// Floating-point round to integral, to nearest with ties to even
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frintn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrintn.v4f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frintn.v4f32")]
fn vrndnq_f32_(a: float32x4_t) -> float32x4_t;
}
vrndnq_f32_(a)
}
/// Saturating add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.v8i8")]
fn vqadd_u8_(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t;
}
vqadd_u8_(a, b)
}
/// Saturating add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.v16i8")]
fn vqaddq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
}
vqaddq_u8_(a, b)
}
/// Saturating add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.v4i16")]
fn vqadd_u16_(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t;
}
vqadd_u16_(a, b)
}
/// Saturating add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.v8i16")]
fn vqaddq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
}
vqaddq_u16_(a, b)
}
/// Saturating add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.v2i32")]
fn vqadd_u32_(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t;
}
vqadd_u32_(a, b)
}
/// Saturating add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.v4i32")]
fn vqaddq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
}
vqaddq_u32_(a, b)
}
/// Saturating add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v1i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.v1i64")]
fn vqadd_u64_(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t;
}
vqadd_u64_(a, b)
}
/// Saturating add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.uadd.sat.v2i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqadd.v2i64")]
fn vqaddq_u64_(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
}
vqaddq_u64_(a, b)
}
/// Saturating add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.v8i8")]
fn vqadd_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t;
}
vqadd_s8_(a, b)
}
/// Saturating add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.v16i8")]
fn vqaddq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t;
}
vqaddq_s8_(a, b)
}
/// Saturating add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.v4i16")]
fn vqadd_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t;
}
vqadd_s16_(a, b)
}
/// Saturating add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.v8i16")]
fn vqaddq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t;
}
vqaddq_s16_(a, b)
}
/// Saturating add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.v2i32")]
fn vqadd_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t;
}
vqadd_s32_(a, b)
}
/// Saturating add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.v4i32")]
fn vqaddq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t;
}
vqaddq_s32_(a, b)
}
/// Saturating add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v1i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.v1i64")]
fn vqadd_s64_(a: int64x1_t, b: int64x1_t) -> int64x1_t;
}
vqadd_s64_(a, b)
}
/// Saturating add
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.sadd.sat.v2i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqadd.v2i64")]
fn vqaddq_s64_(a: int64x2_t, b: int64x2_t) -> int64x2_t;
}
vqaddq_s64_(a, b)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i8.p0i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v8i8.p0i8")]
fn vld1_s8_x2_(a: *const i8) -> int8x8x2_t;
}
vld1_s8_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i16.p0i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v4i16.p0i16")]
fn vld1_s16_x2_(a: *const i16) -> int16x4x2_t;
}
vld1_s16_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i32.p0i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v2i32.p0i32")]
fn vld1_s32_x2_(a: *const i32) -> int32x2x2_t;
}
vld1_s32_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v1i64.p0i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v1i64.p0i64")]
fn vld1_s64_x2_(a: *const i64) -> int64x1x2_t;
}
vld1_s64_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v16i8.p0i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v16i8.p0i8")]
fn vld1q_s8_x2_(a: *const i8) -> int8x16x2_t;
}
vld1q_s8_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v8i16.p0i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v8i16.p0i16")]
fn vld1q_s16_x2_(a: *const i16) -> int16x8x2_t;
}
vld1q_s16_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4i32.p0i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v4i32.p0i32")]
fn vld1q_s32_x2_(a: *const i32) -> int32x4x2_t;
}
vld1q_s32_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2i64.p0i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v2i64.p0i64")]
fn vld1q_s64_x2_(a: *const i64) -> int64x2x2_t;
}
vld1q_s64_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i8.p0i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v8i8.p0i8")]
fn vld1_s8_x3_(a: *const i8) -> int8x8x3_t;
}
vld1_s8_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i16.p0i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v4i16.p0i16")]
fn vld1_s16_x3_(a: *const i16) -> int16x4x3_t;
}
vld1_s16_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i32.p0i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v2i32.p0i32")]
fn vld1_s32_x3_(a: *const i32) -> int32x2x3_t;
}
vld1_s32_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v1i64.p0i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v1i64.p0i64")]
fn vld1_s64_x3_(a: *const i64) -> int64x1x3_t;
}
vld1_s64_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v16i8.p0i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v16i8.p0i8")]
fn vld1q_s8_x3_(a: *const i8) -> int8x16x3_t;
}
vld1q_s8_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v8i16.p0i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v8i16.p0i16")]
fn vld1q_s16_x3_(a: *const i16) -> int16x8x3_t;
}
vld1q_s16_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4i32.p0i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v4i32.p0i32")]
fn vld1q_s32_x3_(a: *const i32) -> int32x4x3_t;
}
vld1q_s32_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2i64.p0i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v2i64.p0i64")]
fn vld1q_s64_x3_(a: *const i64) -> int64x2x3_t;
}
vld1q_s64_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i8.p0i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v8i8.p0i8")]
fn vld1_s8_x4_(a: *const i8) -> int8x8x4_t;
}
vld1_s8_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i16.p0i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v4i16.p0i16")]
fn vld1_s16_x4_(a: *const i16) -> int16x4x4_t;
}
vld1_s16_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i32.p0i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v2i32.p0i32")]
fn vld1_s32_x4_(a: *const i32) -> int32x2x4_t;
}
vld1_s32_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v1i64.p0i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v1i64.p0i64")]
fn vld1_s64_x4_(a: *const i64) -> int64x1x4_t;
}
vld1_s64_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v16i8.p0i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v16i8.p0i8")]
fn vld1q_s8_x4_(a: *const i8) -> int8x16x4_t;
}
vld1q_s8_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v8i16.p0i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v8i16.p0i16")]
fn vld1q_s16_x4_(a: *const i16) -> int16x8x4_t;
}
vld1q_s16_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4i32.p0i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v4i32.p0i32")]
fn vld1q_s32_x4_(a: *const i32) -> int32x4x4_t;
}
vld1q_s32_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2i64.p0i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v2i64.p0i64")]
fn vld1q_s64_x4_(a: *const i64) -> int64x2x4_t;
}
vld1q_s64_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t {
transmute(vld1_s8_x2(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t {
transmute(vld1_s16_x2(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t {
transmute(vld1_s32_x2(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t {
transmute(vld1_s64_x2(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t {
transmute(vld1q_s8_x2(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t {
transmute(vld1q_s16_x2(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t {
transmute(vld1q_s32_x2(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t {
transmute(vld1q_s64_x2(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t {
transmute(vld1_s8_x3(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t {
transmute(vld1_s16_x3(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t {
transmute(vld1_s32_x3(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t {
transmute(vld1_s64_x3(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t {
transmute(vld1q_s8_x3(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t {
transmute(vld1q_s16_x3(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t {
transmute(vld1q_s32_x3(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t {
transmute(vld1q_s64_x3(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t {
transmute(vld1_s8_x4(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t {
transmute(vld1_s16_x4(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t {
transmute(vld1_s32_x4(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t {
transmute(vld1_s64_x4(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t {
transmute(vld1q_s8_x4(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t {
transmute(vld1q_s16_x4(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t {
transmute(vld1q_s32_x4(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t {
transmute(vld1q_s64_x4(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t {
transmute(vld1_s8_x2(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t {
transmute(vld1_s8_x3(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t {
transmute(vld1_s8_x4(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t {
transmute(vld1q_s8_x2(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t {
transmute(vld1q_s8_x3(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t {
transmute(vld1q_s8_x4(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t {
transmute(vld1_s16_x2(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t {
transmute(vld1_s16_x3(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t {
transmute(vld1_s16_x4(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t {
transmute(vld1q_s16_x2(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t {
transmute(vld1q_s16_x3(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t {
transmute(vld1q_s16_x4(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x2)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t {
transmute(vld1_s64_x2(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x3)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t {
transmute(vld1_s64_x3(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x4)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t {
transmute(vld1_s64_x4(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x2)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t {
transmute(vld1q_s64_x2(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x3)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t {
transmute(vld1q_s64_x3(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x4)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t {
transmute(vld1q_s64_x4(transmute(a)))
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v2f32.p0f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v2f32.p0f32")]
fn vld1_f32_x2_(a: *const f32) -> float32x2x2_t;
}
vld1_f32_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x2.v4f32.p0f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x2.v4f32.p0f32")]
fn vld1q_f32_x2_(a: *const f32) -> float32x4x2_t;
}
vld1q_f32_x2_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v2f32.p0f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v2f32.p0f32")]
fn vld1_f32_x3_(a: *const f32) -> float32x2x3_t;
}
vld1_f32_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x3.v4f32.p0f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x3.v4f32.p0f32")]
fn vld1q_f32_x3_(a: *const f32) -> float32x4x3_t;
}
vld1q_f32_x3_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v2f32.p0f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v2f32.p0f32")]
fn vld1_f32_x4_(a: *const f32) -> float32x2x4_t;
}
vld1_f32_x4_(a)
}
/// Load multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld1x4.v4f32.p0f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld1x4.v4f32.p0f32")]
fn vld1q_f32_x4_(a: *const f32) -> float32x4x4_t;
}
vld1q_f32_x4_(a)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2))]
pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i8.p0i8")]
fn vld2_s8_(ptr: *const i8, size: i32) -> int8x8x2_t;
}
vld2_s8_(a as *const i8, 1)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2.v8i8.p0v8i8")]
fn vld2_s8_(ptr: *const int8x8_t) -> int8x8x2_t;
}
vld2_s8_(a as _)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2))]
pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i16.p0i8")]
fn vld2_s16_(ptr: *const i8, size: i32) -> int16x4x2_t;
}
vld2_s16_(a as *const i8, 2)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2.v4i16.p0v4i16")]
fn vld2_s16_(ptr: *const int16x4_t) -> int16x4x2_t;
}
vld2_s16_(a as _)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2))]
pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2i32.p0i8")]
fn vld2_s32_(ptr: *const i8, size: i32) -> int32x2x2_t;
}
vld2_s32_(a as *const i8, 4)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2.v2i32.p0v2i32")]
fn vld2_s32_(ptr: *const int32x2_t) -> int32x2x2_t;
}
vld2_s32_(a as _)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2))]
pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v16i8.p0i8")]
fn vld2q_s8_(ptr: *const i8, size: i32) -> int8x16x2_t;
}
vld2q_s8_(a as *const i8, 1)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2.v16i8.p0v16i8")]
fn vld2q_s8_(ptr: *const int8x16_t) -> int8x16x2_t;
}
vld2q_s8_(a as _)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2))]
pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v8i16.p0i8")]
fn vld2q_s16_(ptr: *const i8, size: i32) -> int16x8x2_t;
}
vld2q_s16_(a as *const i8, 2)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2.v8i16.p0v8i16")]
fn vld2q_s16_(ptr: *const int16x8_t) -> int16x8x2_t;
}
vld2q_s16_(a as _)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2))]
pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4i32.p0i8")]
fn vld2q_s32_(ptr: *const i8, size: i32) -> int32x4x2_t;
}
vld2q_s32_(a as *const i8, 4)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2.v4i32.p0v4i32")]
fn vld2q_s32_(ptr: *const int32x4_t) -> int32x4x2_t;
}
vld2q_s32_(a as _)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(nop))]
pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v1i64.p0i8")]
fn vld2_s64_(ptr: *const i8, size: i32) -> int64x1x2_t;
}
vld2_s64_(a as *const i8, 8)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2.v1i64.p0v1i64")]
fn vld2_s64_(ptr: *const int64x1_t) -> int64x1x2_t;
}
vld2_s64_(a as _)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t {
transmute(vld2_s8(transmute(a)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t {
transmute(vld2_s16(transmute(a)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t {
transmute(vld2_s32(transmute(a)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t {
transmute(vld2q_s8(transmute(a)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t {
transmute(vld2q_s16(transmute(a)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t {
transmute(vld2q_s32(transmute(a)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t {
transmute(vld2_s8(transmute(a)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t {
transmute(vld2_s16(transmute(a)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t {
transmute(vld2q_s8(transmute(a)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t {
transmute(vld2q_s16(transmute(a)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t {
transmute(vld2_s64(transmute(a)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t {
transmute(vld2_s64(transmute(a)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2))]
pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v2f32.p0i8")]
fn vld2_f32_(ptr: *const i8, size: i32) -> float32x2x2_t;
}
vld2_f32_(a as *const i8, 4)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2.v2f32.p0v2f32")]
fn vld2_f32_(ptr: *const float32x2_t) -> float32x2x2_t;
}
vld2_f32_(a as _)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2))]
pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2.v4f32.p0i8")]
fn vld2q_f32_(ptr: *const i8, size: i32) -> float32x4x2_t;
}
vld2q_f32_(a as *const i8, 4)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2.v4f32.p0v4f32")]
fn vld2q_f32_(ptr: *const float32x4_t) -> float32x4x2_t;
}
vld2q_f32_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2))]
pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i8.p0i8")]
fn vld2_dup_s8_(ptr: *const i8, size: i32) -> int8x8x2_t;
}
vld2_dup_s8_(a as *const i8, 1)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2r.v8i8.p0i8")]
fn vld2_dup_s8_(ptr: *const i8) -> int8x8x2_t;
}
vld2_dup_s8_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2))]
pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i16.p0i8")]
fn vld2_dup_s16_(ptr: *const i8, size: i32) -> int16x4x2_t;
}
vld2_dup_s16_(a as *const i8, 2)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2r.v4i16.p0i16")]
fn vld2_dup_s16_(ptr: *const i16) -> int16x4x2_t;
}
vld2_dup_s16_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2))]
pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2i32.p0i8")]
fn vld2_dup_s32_(ptr: *const i8, size: i32) -> int32x2x2_t;
}
vld2_dup_s32_(a as *const i8, 4)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2r.v2i32.p0i32")]
fn vld2_dup_s32_(ptr: *const i32) -> int32x2x2_t;
}
vld2_dup_s32_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2))]
pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v16i8.p0i8")]
fn vld2q_dup_s8_(ptr: *const i8, size: i32) -> int8x16x2_t;
}
vld2q_dup_s8_(a as *const i8, 1)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2r.v16i8.p0i8")]
fn vld2q_dup_s8_(ptr: *const i8) -> int8x16x2_t;
}
vld2q_dup_s8_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2))]
pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v8i16.p0i8")]
fn vld2q_dup_s16_(ptr: *const i8, size: i32) -> int16x8x2_t;
}
vld2q_dup_s16_(a as *const i8, 2)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2r.v8i16.p0i16")]
fn vld2q_dup_s16_(ptr: *const i16) -> int16x8x2_t;
}
vld2q_dup_s16_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2))]
pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4i32.p0i8")]
fn vld2q_dup_s32_(ptr: *const i8, size: i32) -> int32x4x2_t;
}
vld2q_dup_s32_(a as *const i8, 4)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2r.v4i32.p0i32")]
fn vld2q_dup_s32_(ptr: *const i32) -> int32x4x2_t;
}
vld2q_dup_s32_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(nop))]
pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v1i64.p0i8")]
fn vld2_dup_s64_(ptr: *const i8, size: i32) -> int64x1x2_t;
}
vld2_dup_s64_(a as *const i8, 8)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2r.v1i64.p0i64")]
fn vld2_dup_s64_(ptr: *const i64) -> int64x1x2_t;
}
vld2_dup_s64_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t {
transmute(vld2_dup_s8(transmute(a)))
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t {
transmute(vld2_dup_s16(transmute(a)))
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t {
transmute(vld2_dup_s32(transmute(a)))
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t {
transmute(vld2q_dup_s8(transmute(a)))
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t {
transmute(vld2q_dup_s16(transmute(a)))
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t {
transmute(vld2q_dup_s32(transmute(a)))
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t {
transmute(vld2_dup_s8(transmute(a)))
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t {
transmute(vld2_dup_s16(transmute(a)))
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t {
transmute(vld2q_dup_s8(transmute(a)))
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t {
transmute(vld2q_dup_s16(transmute(a)))
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t {
transmute(vld2_dup_s64(transmute(a)))
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t {
transmute(vld2_dup_s64(transmute(a)))
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2))]
pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v2f32.p0i8")]
fn vld2_dup_f32_(ptr: *const i8, size: i32) -> float32x2x2_t;
}
vld2_dup_f32_(a as *const i8, 4)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2r.v2f32.p0f32")]
fn vld2_dup_f32_(ptr: *const f32) -> float32x2x2_t;
}
vld2_dup_f32_(a as _)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2))]
pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2dup.v4f32.p0i8")]
fn vld2q_dup_f32_(ptr: *const i8, size: i32) -> float32x4x2_t;
}
vld2q_dup_f32_(a as *const i8, 4)
}
/// Load single 2-element structure and replicate to all lanes of two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2r.v4f32.p0f32")]
fn vld2q_dup_f32_(ptr: *const f32) -> float32x4x2_t;
}
vld2q_dup_f32_(a as _)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld2_lane_s8<const LANE: i32>(a: *const i8, b: int8x8x2_t) -> int8x8x2_t {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0i8")]
fn vld2_lane_s8_(ptr: *const i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32) -> int8x8x2_t;
}
vld2_lane_s8_(a as _, b.0, b.1, LANE, 1)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_lane_s8<const LANE: i32>(a: *const i8, b: int8x8x2_t) -> int8x8x2_t {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0i8")]
fn vld2_lane_s8_(a: int8x8_t, b: int8x8_t, n: i64, ptr: *const i8) -> int8x8x2_t;
}
vld2_lane_s8_(b.0, b.1, LANE as i64, a as _)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld2_lane_s16<const LANE: i32>(a: *const i16, b: int16x4x2_t) -> int16x4x2_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0i8")]
fn vld2_lane_s16_(ptr: *const i8, a: int16x4_t, b: int16x4_t, n: i32, size: i32) -> int16x4x2_t;
}
vld2_lane_s16_(a as _, b.0, b.1, LANE, 2)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_lane_s16<const LANE: i32>(a: *const i16, b: int16x4x2_t) -> int16x4x2_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0i8")]
fn vld2_lane_s16_(a: int16x4_t, b: int16x4_t, n: i64, ptr: *const i8) -> int16x4x2_t;
}
vld2_lane_s16_(b.0, b.1, LANE as i64, a as _)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld2_lane_s32<const LANE: i32>(a: *const i32, b: int32x2x2_t) -> int32x2x2_t {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0i8")]
fn vld2_lane_s32_(ptr: *const i8, a: int32x2_t, b: int32x2_t, n: i32, size: i32) -> int32x2x2_t;
}
vld2_lane_s32_(a as _, b.0, b.1, LANE, 4)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_lane_s32<const LANE: i32>(a: *const i32, b: int32x2x2_t) -> int32x2x2_t {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0i8")]
fn vld2_lane_s32_(a: int32x2_t, b: int32x2_t, n: i64, ptr: *const i8) -> int32x2x2_t;
}
vld2_lane_s32_(b.0, b.1, LANE as i64, a as _)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld2q_lane_s16<const LANE: i32>(a: *const i16, b: int16x8x2_t) -> int16x8x2_t {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0i8")]
fn vld2q_lane_s16_(ptr: *const i8, a: int16x8_t, b: int16x8_t, n: i32, size: i32) -> int16x8x2_t;
}
vld2q_lane_s16_(a as _, b.0, b.1, LANE, 2)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_lane_s16<const LANE: i32>(a: *const i16, b: int16x8x2_t) -> int16x8x2_t {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0i8")]
fn vld2q_lane_s16_(a: int16x8_t, b: int16x8_t, n: i64, ptr: *const i8) -> int16x8x2_t;
}
vld2q_lane_s16_(b.0, b.1, LANE as i64, a as _)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld2q_lane_s32<const LANE: i32>(a: *const i32, b: int32x4x2_t) -> int32x4x2_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0i8")]
fn vld2q_lane_s32_(ptr: *const i8, a: int32x4_t, b: int32x4_t, n: i32, size: i32) -> int32x4x2_t;
}
vld2q_lane_s32_(a as _, b.0, b.1, LANE, 4)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_lane_s32<const LANE: i32>(a: *const i32, b: int32x4x2_t) -> int32x4x2_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0i8")]
fn vld2q_lane_s32_(a: int32x4_t, b: int32x4_t, n: i64, ptr: *const i8) -> int32x4x2_t;
}
vld2q_lane_s32_(b.0, b.1, LANE as i64, a as _)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x2_t) -> uint8x8x2_t {
static_assert_imm3!(LANE);
transmute(vld2_lane_s8::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x2_t) -> uint16x4x2_t {
static_assert_imm2!(LANE);
transmute(vld2_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x2_t) -> uint32x2x2_t {
static_assert_imm1!(LANE);
transmute(vld2_lane_s32::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x2_t) -> uint16x8x2_t {
static_assert_imm3!(LANE);
transmute(vld2q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x2_t) -> uint32x4x2_t {
static_assert_imm2!(LANE);
transmute(vld2q_lane_s32::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x2_t) -> poly8x8x2_t {
static_assert_imm3!(LANE);
transmute(vld2_lane_s8::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x2_t) -> poly16x4x2_t {
static_assert_imm2!(LANE);
transmute(vld2_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2q_lane_p16<const LANE: i32>(a: *const p16, b: poly16x8x2_t) -> poly16x8x2_t {
static_assert_imm3!(LANE);
transmute(vld2q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld2_lane_f32<const LANE: i32>(a: *const f32, b: float32x2x2_t) -> float32x2x2_t {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0i8")]
fn vld2_lane_f32_(ptr: *const i8, a: float32x2_t, b: float32x2_t, n: i32, size: i32) -> float32x2x2_t;
}
vld2_lane_f32_(a as _, b.0, b.1, LANE, 4)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_lane_f32<const LANE: i32>(a: *const f32, b: float32x2x2_t) -> float32x2x2_t {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0i8")]
fn vld2_lane_f32_(a: float32x2_t, b: float32x2_t, n: i64, ptr: *const i8) -> float32x2x2_t;
}
vld2_lane_f32_(b.0, b.1, LANE as i64, a as _)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld2q_lane_f32<const LANE: i32>(a: *const f32, b: float32x4x2_t) -> float32x4x2_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0i8")]
fn vld2q_lane_f32_(ptr: *const i8, a: float32x4_t, b: float32x4_t, n: i32, size: i32) -> float32x4x2_t;
}
vld2q_lane_f32_(a as _, b.0, b.1, LANE, 4)
}
/// Load multiple 2-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_lane_f32<const LANE: i32>(a: *const f32, b: float32x4x2_t) -> float32x4x2_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0i8")]
fn vld2q_lane_f32_(a: float32x4_t, b: float32x4_t, n: i64, ptr: *const i8) -> float32x4x2_t;
}
vld2q_lane_f32_(b.0, b.1, LANE as i64, a as _)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3))]
pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i8.p0i8")]
fn vld3_s8_(ptr: *const i8, size: i32) -> int8x8x3_t;
}
vld3_s8_(a as *const i8, 1)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3.v8i8.p0v8i8")]
fn vld3_s8_(ptr: *const int8x8_t) -> int8x8x3_t;
}
vld3_s8_(a as _)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3))]
pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i16.p0i8")]
fn vld3_s16_(ptr: *const i8, size: i32) -> int16x4x3_t;
}
vld3_s16_(a as *const i8, 2)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3.v4i16.p0v4i16")]
fn vld3_s16_(ptr: *const int16x4_t) -> int16x4x3_t;
}
vld3_s16_(a as _)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3))]
pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2i32.p0i8")]
fn vld3_s32_(ptr: *const i8, size: i32) -> int32x2x3_t;
}
vld3_s32_(a as *const i8, 4)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3.v2i32.p0v2i32")]
fn vld3_s32_(ptr: *const int32x2_t) -> int32x2x3_t;
}
vld3_s32_(a as _)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3))]
pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v16i8.p0i8")]
fn vld3q_s8_(ptr: *const i8, size: i32) -> int8x16x3_t;
}
vld3q_s8_(a as *const i8, 1)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3.v16i8.p0v16i8")]
fn vld3q_s8_(ptr: *const int8x16_t) -> int8x16x3_t;
}
vld3q_s8_(a as _)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3))]
pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v8i16.p0i8")]
fn vld3q_s16_(ptr: *const i8, size: i32) -> int16x8x3_t;
}
vld3q_s16_(a as *const i8, 2)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3.v8i16.p0v8i16")]
fn vld3q_s16_(ptr: *const int16x8_t) -> int16x8x3_t;
}
vld3q_s16_(a as _)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3))]
pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4i32.p0i8")]
fn vld3q_s32_(ptr: *const i8, size: i32) -> int32x4x3_t;
}
vld3q_s32_(a as *const i8, 4)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3.v4i32.p0v4i32")]
fn vld3q_s32_(ptr: *const int32x4_t) -> int32x4x3_t;
}
vld3q_s32_(a as _)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(nop))]
pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v1i64.p0i8")]
fn vld3_s64_(ptr: *const i8, size: i32) -> int64x1x3_t;
}
vld3_s64_(a as *const i8, 8)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3.v1i64.p0v1i64")]
fn vld3_s64_(ptr: *const int64x1_t) -> int64x1x3_t;
}
vld3_s64_(a as _)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t {
transmute(vld3_s8(transmute(a)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t {
transmute(vld3_s16(transmute(a)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t {
transmute(vld3_s32(transmute(a)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t {
transmute(vld3q_s8(transmute(a)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t {
transmute(vld3q_s16(transmute(a)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t {
transmute(vld3q_s32(transmute(a)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t {
transmute(vld3_s8(transmute(a)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t {
transmute(vld3_s16(transmute(a)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t {
transmute(vld3q_s8(transmute(a)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t {
transmute(vld3q_s16(transmute(a)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t {
transmute(vld3_s64(transmute(a)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t {
transmute(vld3_s64(transmute(a)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3))]
pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v2f32.p0i8")]
fn vld3_f32_(ptr: *const i8, size: i32) -> float32x2x3_t;
}
vld3_f32_(a as *const i8, 4)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3.v2f32.p0v2f32")]
fn vld3_f32_(ptr: *const float32x2_t) -> float32x2x3_t;
}
vld3_f32_(a as _)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3))]
pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3.v4f32.p0i8")]
fn vld3q_f32_(ptr: *const i8, size: i32) -> float32x4x3_t;
}
vld3q_f32_(a as *const i8, 4)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3.v4f32.p0v4f32")]
fn vld3q_f32_(ptr: *const float32x4_t) -> float32x4x3_t;
}
vld3q_f32_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3))]
pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i8.p0i8")]
fn vld3_dup_s8_(ptr: *const i8, size: i32) -> int8x8x3_t;
}
vld3_dup_s8_(a as *const i8, 1)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3r.v8i8.p0i8")]
fn vld3_dup_s8_(ptr: *const i8) -> int8x8x3_t;
}
vld3_dup_s8_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3))]
pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i16.p0i8")]
fn vld3_dup_s16_(ptr: *const i8, size: i32) -> int16x4x3_t;
}
vld3_dup_s16_(a as *const i8, 2)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3r.v4i16.p0i16")]
fn vld3_dup_s16_(ptr: *const i16) -> int16x4x3_t;
}
vld3_dup_s16_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3))]
pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2i32.p0i8")]
fn vld3_dup_s32_(ptr: *const i8, size: i32) -> int32x2x3_t;
}
vld3_dup_s32_(a as *const i8, 4)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3r.v2i32.p0i32")]
fn vld3_dup_s32_(ptr: *const i32) -> int32x2x3_t;
}
vld3_dup_s32_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3))]
pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v16i8.p0i8")]
fn vld3q_dup_s8_(ptr: *const i8, size: i32) -> int8x16x3_t;
}
vld3q_dup_s8_(a as *const i8, 1)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3r.v16i8.p0i8")]
fn vld3q_dup_s8_(ptr: *const i8) -> int8x16x3_t;
}
vld3q_dup_s8_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3))]
pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v8i16.p0i8")]
fn vld3q_dup_s16_(ptr: *const i8, size: i32) -> int16x8x3_t;
}
vld3q_dup_s16_(a as *const i8, 2)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3r.v8i16.p0i16")]
fn vld3q_dup_s16_(ptr: *const i16) -> int16x8x3_t;
}
vld3q_dup_s16_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3))]
pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4i32.p0i8")]
fn vld3q_dup_s32_(ptr: *const i8, size: i32) -> int32x4x3_t;
}
vld3q_dup_s32_(a as *const i8, 4)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3r.v4i32.p0i32")]
fn vld3q_dup_s32_(ptr: *const i32) -> int32x4x3_t;
}
vld3q_dup_s32_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(nop))]
pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v1i64.p0i8")]
fn vld3_dup_s64_(ptr: *const i8, size: i32) -> int64x1x3_t;
}
vld3_dup_s64_(a as *const i8, 8)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3r.v1i64.p0i64")]
fn vld3_dup_s64_(ptr: *const i64) -> int64x1x3_t;
}
vld3_dup_s64_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t {
transmute(vld3_dup_s8(transmute(a)))
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t {
transmute(vld3_dup_s16(transmute(a)))
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t {
transmute(vld3_dup_s32(transmute(a)))
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t {
transmute(vld3q_dup_s8(transmute(a)))
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t {
transmute(vld3q_dup_s16(transmute(a)))
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t {
transmute(vld3q_dup_s32(transmute(a)))
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t {
transmute(vld3_dup_s8(transmute(a)))
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t {
transmute(vld3_dup_s16(transmute(a)))
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t {
transmute(vld3q_dup_s8(transmute(a)))
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t {
transmute(vld3q_dup_s16(transmute(a)))
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t {
transmute(vld3_dup_s64(transmute(a)))
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t {
transmute(vld3_dup_s64(transmute(a)))
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3))]
pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v2f32.p0i8")]
fn vld3_dup_f32_(ptr: *const i8, size: i32) -> float32x2x3_t;
}
vld3_dup_f32_(a as *const i8, 4)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3r.v2f32.p0f32")]
fn vld3_dup_f32_(ptr: *const f32) -> float32x2x3_t;
}
vld3_dup_f32_(a as _)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3))]
pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3dup.v4f32.p0i8")]
fn vld3q_dup_f32_(ptr: *const i8, size: i32) -> float32x4x3_t;
}
vld3q_dup_f32_(a as *const i8, 4)
}
/// Load single 3-element structure and replicate to all lanes of three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3r.v4f32.p0f32")]
fn vld3q_dup_f32_(ptr: *const f32) -> float32x4x3_t;
}
vld3q_dup_f32_(a as _)
}
/// Load multiple 3-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld3_lane_s8<const LANE: i32>(a: *const i8, b: int8x8x3_t) -> int8x8x3_t {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0i8")]
fn vld3_lane_s8_(ptr: *const i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i32, size: i32) -> int8x8x3_t;
}
vld3_lane_s8_(a as _, b.0, b.1, b.2, LANE, 1)
}
/// Load multiple 3-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_lane_s8<const LANE: i32>(a: *const i8, b: int8x8x3_t) -> int8x8x3_t {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0i8")]
fn vld3_lane_s8_(a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i64, ptr: *const i8) -> int8x8x3_t;
}
vld3_lane_s8_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Load multiple 3-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld3_lane_s16<const LANE: i32>(a: *const i16, b: int16x4x3_t) -> int16x4x3_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0i8")]
fn vld3_lane_s16_(ptr: *const i8, a: int16x4_t, b: int16x4_t, c: int16x4_t, n: i32, size: i32) -> int16x4x3_t;
}
vld3_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2)
}
/// Load multiple 3-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_lane_s16<const LANE: i32>(a: *const i16, b: int16x4x3_t) -> int16x4x3_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0i8")]
fn vld3_lane_s16_(a: int16x4_t, b: int16x4_t, c: int16x4_t, n: i64, ptr: *const i8) -> int16x4x3_t;
}
vld3_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Load multiple 3-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld3_lane_s32<const LANE: i32>(a: *const i32, b: int32x2x3_t) -> int32x2x3_t {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0i8")]
fn vld3_lane_s32_(ptr: *const i8, a: int32x2_t, b: int32x2_t, c: int32x2_t, n: i32, size: i32) -> int32x2x3_t;
}
vld3_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4)
}
/// Load multiple 3-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_lane_s32<const LANE: i32>(a: *const i32, b: int32x2x3_t) -> int32x2x3_t {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0i8")]
fn vld3_lane_s32_(a: int32x2_t, b: int32x2_t, c: int32x2_t, n: i64, ptr: *const i8) -> int32x2x3_t;
}
vld3_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Load multiple 3-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld3q_lane_s16<const LANE: i32>(a: *const i16, b: int16x8x3_t) -> int16x8x3_t {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0i8")]
fn vld3q_lane_s16_(ptr: *const i8, a: int16x8_t, b: int16x8_t, c: int16x8_t, n: i32, size: i32) -> int16x8x3_t;
}
vld3q_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2)
}
/// Load multiple 3-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_lane_s16<const LANE: i32>(a: *const i16, b: int16x8x3_t) -> int16x8x3_t {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0i8")]
fn vld3q_lane_s16_(a: int16x8_t, b: int16x8_t, c: int16x8_t, n: i64, ptr: *const i8) -> int16x8x3_t;
}
vld3q_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Load multiple 3-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld3q_lane_s32<const LANE: i32>(a: *const i32, b: int32x4x3_t) -> int32x4x3_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0i8")]
fn vld3q_lane_s32_(ptr: *const i8, a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i32, size: i32) -> int32x4x3_t;
}
vld3q_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4)
}
/// Load multiple 3-element structures to two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_lane_s32<const LANE: i32>(a: *const i32, b: int32x4x3_t) -> int32x4x3_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0i8")]
fn vld3q_lane_s32_(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64, ptr: *const i8) -> int32x4x3_t;
}
vld3q_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t {
static_assert_imm3!(LANE);
transmute(vld3_lane_s8::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t {
static_assert_imm2!(LANE);
transmute(vld3_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t {
static_assert_imm1!(LANE);
transmute(vld3_lane_s32::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t {
static_assert_imm3!(LANE);
transmute(vld3q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t {
static_assert_imm2!(LANE);
transmute(vld3q_lane_s32::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t {
static_assert_imm3!(LANE);
transmute(vld3_lane_s8::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t {
static_assert_imm2!(LANE);
transmute(vld3_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3q_lane_p16<const LANE: i32>(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t {
static_assert_imm3!(LANE);
transmute(vld3q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld3_lane_f32<const LANE: i32>(a: *const f32, b: float32x2x3_t) -> float32x2x3_t {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0i8")]
fn vld3_lane_f32_(ptr: *const i8, a: float32x2_t, b: float32x2_t, c: float32x2_t, n: i32, size: i32) -> float32x2x3_t;
}
vld3_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_lane_f32<const LANE: i32>(a: *const f32, b: float32x2x3_t) -> float32x2x3_t {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0i8")]
fn vld3_lane_f32_(a: float32x2_t, b: float32x2_t, c: float32x2_t, n: i64, ptr: *const i8) -> float32x2x3_t;
}
vld3_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld3q_lane_f32<const LANE: i32>(a: *const f32, b: float32x4x3_t) -> float32x4x3_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0i8")]
fn vld3q_lane_f32_(ptr: *const i8, a: float32x4_t, b: float32x4_t, c: float32x4_t, n: i32, size: i32) -> float32x4x3_t;
}
vld3q_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4)
}
/// Load multiple 3-element structures to three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_lane_f32<const LANE: i32>(a: *const f32, b: float32x4x3_t) -> float32x4x3_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0i8")]
fn vld3q_lane_f32_(a: float32x4_t, b: float32x4_t, c: float32x4_t, n: i64, ptr: *const i8) -> float32x4x3_t;
}
vld3q_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4))]
pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i8.p0i8")]
fn vld4_s8_(ptr: *const i8, size: i32) -> int8x8x4_t;
}
vld4_s8_(a as *const i8, 1)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4.v8i8.p0v8i8")]
fn vld4_s8_(ptr: *const int8x8_t) -> int8x8x4_t;
}
vld4_s8_(a as _)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4))]
pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i16.p0i8")]
fn vld4_s16_(ptr: *const i8, size: i32) -> int16x4x4_t;
}
vld4_s16_(a as *const i8, 2)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4.v4i16.p0v4i16")]
fn vld4_s16_(ptr: *const int16x4_t) -> int16x4x4_t;
}
vld4_s16_(a as _)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4))]
pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2i32.p0i8")]
fn vld4_s32_(ptr: *const i8, size: i32) -> int32x2x4_t;
}
vld4_s32_(a as *const i8, 4)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4.v2i32.p0v2i32")]
fn vld4_s32_(ptr: *const int32x2_t) -> int32x2x4_t;
}
vld4_s32_(a as _)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4))]
pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v16i8.p0i8")]
fn vld4q_s8_(ptr: *const i8, size: i32) -> int8x16x4_t;
}
vld4q_s8_(a as *const i8, 1)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4.v16i8.p0v16i8")]
fn vld4q_s8_(ptr: *const int8x16_t) -> int8x16x4_t;
}
vld4q_s8_(a as _)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4))]
pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v8i16.p0i8")]
fn vld4q_s16_(ptr: *const i8, size: i32) -> int16x8x4_t;
}
vld4q_s16_(a as *const i8, 2)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4.v8i16.p0v8i16")]
fn vld4q_s16_(ptr: *const int16x8_t) -> int16x8x4_t;
}
vld4q_s16_(a as _)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4))]
pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4i32.p0i8")]
fn vld4q_s32_(ptr: *const i8, size: i32) -> int32x4x4_t;
}
vld4q_s32_(a as *const i8, 4)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4.v4i32.p0v4i32")]
fn vld4q_s32_(ptr: *const int32x4_t) -> int32x4x4_t;
}
vld4q_s32_(a as _)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(nop))]
pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v1i64.p0i8")]
fn vld4_s64_(ptr: *const i8, size: i32) -> int64x1x4_t;
}
vld4_s64_(a as *const i8, 8)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4.v1i64.p0v1i64")]
fn vld4_s64_(ptr: *const int64x1_t) -> int64x1x4_t;
}
vld4_s64_(a as _)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t {
transmute(vld4_s8(transmute(a)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t {
transmute(vld4_s16(transmute(a)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t {
transmute(vld4_s32(transmute(a)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t {
transmute(vld4q_s8(transmute(a)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t {
transmute(vld4q_s16(transmute(a)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t {
transmute(vld4q_s32(transmute(a)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t {
transmute(vld4_s8(transmute(a)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t {
transmute(vld4_s16(transmute(a)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t {
transmute(vld4q_s8(transmute(a)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t {
transmute(vld4q_s16(transmute(a)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t {
transmute(vld4_s64(transmute(a)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t {
transmute(vld4_s64(transmute(a)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4))]
pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v2f32.p0i8")]
fn vld4_f32_(ptr: *const i8, size: i32) -> float32x2x4_t;
}
vld4_f32_(a as *const i8, 4)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4.v2f32.p0v2f32")]
fn vld4_f32_(ptr: *const float32x2_t) -> float32x2x4_t;
}
vld4_f32_(a as _)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4))]
pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4.v4f32.p0i8")]
fn vld4q_f32_(ptr: *const i8, size: i32) -> float32x4x4_t;
}
vld4q_f32_(a as *const i8, 4)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4.v4f32.p0v4f32")]
fn vld4q_f32_(ptr: *const float32x4_t) -> float32x4x4_t;
}
vld4q_f32_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4))]
pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i8.p0i8")]
fn vld4_dup_s8_(ptr: *const i8, size: i32) -> int8x8x4_t;
}
vld4_dup_s8_(a as *const i8, 1)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4r.v8i8.p0i8")]
fn vld4_dup_s8_(ptr: *const i8) -> int8x8x4_t;
}
vld4_dup_s8_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4))]
pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i16.p0i8")]
fn vld4_dup_s16_(ptr: *const i8, size: i32) -> int16x4x4_t;
}
vld4_dup_s16_(a as *const i8, 2)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4r.v4i16.p0i16")]
fn vld4_dup_s16_(ptr: *const i16) -> int16x4x4_t;
}
vld4_dup_s16_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4))]
pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2i32.p0i8")]
fn vld4_dup_s32_(ptr: *const i8, size: i32) -> int32x2x4_t;
}
vld4_dup_s32_(a as *const i8, 4)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4r.v2i32.p0i32")]
fn vld4_dup_s32_(ptr: *const i32) -> int32x2x4_t;
}
vld4_dup_s32_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4))]
pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v16i8.p0i8")]
fn vld4q_dup_s8_(ptr: *const i8, size: i32) -> int8x16x4_t;
}
vld4q_dup_s8_(a as *const i8, 1)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4r.v16i8.p0i8")]
fn vld4q_dup_s8_(ptr: *const i8) -> int8x16x4_t;
}
vld4q_dup_s8_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4))]
pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v8i16.p0i8")]
fn vld4q_dup_s16_(ptr: *const i8, size: i32) -> int16x8x4_t;
}
vld4q_dup_s16_(a as *const i8, 2)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4r.v8i16.p0i16")]
fn vld4q_dup_s16_(ptr: *const i16) -> int16x8x4_t;
}
vld4q_dup_s16_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4))]
pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4i32.p0i8")]
fn vld4q_dup_s32_(ptr: *const i8, size: i32) -> int32x4x4_t;
}
vld4q_dup_s32_(a as *const i8, 4)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4r.v4i32.p0i32")]
fn vld4q_dup_s32_(ptr: *const i32) -> int32x4x4_t;
}
vld4q_dup_s32_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(nop))]
pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v1i64.p0i8")]
fn vld4_dup_s64_(ptr: *const i8, size: i32) -> int64x1x4_t;
}
vld4_dup_s64_(a as *const i8, 8)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4r.v1i64.p0i64")]
fn vld4_dup_s64_(ptr: *const i64) -> int64x1x4_t;
}
vld4_dup_s64_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t {
transmute(vld4_dup_s8(transmute(a)))
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t {
transmute(vld4_dup_s16(transmute(a)))
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t {
transmute(vld4_dup_s32(transmute(a)))
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t {
transmute(vld4q_dup_s8(transmute(a)))
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t {
transmute(vld4q_dup_s16(transmute(a)))
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t {
transmute(vld4q_dup_s32(transmute(a)))
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t {
transmute(vld4_dup_s8(transmute(a)))
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t {
transmute(vld4_dup_s16(transmute(a)))
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t {
transmute(vld4q_dup_s8(transmute(a)))
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t {
transmute(vld4q_dup_s16(transmute(a)))
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t {
transmute(vld4_dup_s64(transmute(a)))
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t {
transmute(vld4_dup_s64(transmute(a)))
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4))]
pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v2f32.p0i8")]
fn vld4_dup_f32_(ptr: *const i8, size: i32) -> float32x2x4_t;
}
vld4_dup_f32_(a as *const i8, 4)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4r.v2f32.p0f32")]
fn vld4_dup_f32_(ptr: *const f32) -> float32x2x4_t;
}
vld4_dup_f32_(a as _)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4))]
pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4dup.v4f32.p0i8")]
fn vld4q_dup_f32_(ptr: *const i8, size: i32) -> float32x4x4_t;
}
vld4q_dup_f32_(a as *const i8, 4)
}
/// Load single 4-element structure and replicate to all lanes of four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4r))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4r.v4f32.p0f32")]
fn vld4q_dup_f32_(ptr: *const f32) -> float32x4x4_t;
}
vld4q_dup_f32_(a as _)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld4_lane_s8<const LANE: i32>(a: *const i8, b: int8x8x4_t) -> int8x8x4_t {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0i8")]
fn vld4_lane_s8_(ptr: *const i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, n: i32, size: i32) -> int8x8x4_t;
}
vld4_lane_s8_(a as _, b.0, b.1, b.2, b.3, LANE, 1)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_lane_s8<const LANE: i32>(a: *const i8, b: int8x8x4_t) -> int8x8x4_t {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0i8")]
fn vld4_lane_s8_(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, n: i64, ptr: *const i8) -> int8x8x4_t;
}
vld4_lane_s8_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld4_lane_s16<const LANE: i32>(a: *const i16, b: int16x4x4_t) -> int16x4x4_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0i8")]
fn vld4_lane_s16_(ptr: *const i8, a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, n: i32, size: i32) -> int16x4x4_t;
}
vld4_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_lane_s16<const LANE: i32>(a: *const i16, b: int16x4x4_t) -> int16x4x4_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0i8")]
fn vld4_lane_s16_(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, n: i64, ptr: *const i8) -> int16x4x4_t;
}
vld4_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld4_lane_s32<const LANE: i32>(a: *const i32, b: int32x2x4_t) -> int32x2x4_t {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0i8")]
fn vld4_lane_s32_(ptr: *const i8, a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, n: i32, size: i32) -> int32x2x4_t;
}
vld4_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_lane_s32<const LANE: i32>(a: *const i32, b: int32x2x4_t) -> int32x2x4_t {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0i8")]
fn vld4_lane_s32_(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, n: i64, ptr: *const i8) -> int32x2x4_t;
}
vld4_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld4q_lane_s16<const LANE: i32>(a: *const i16, b: int16x8x4_t) -> int16x8x4_t {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0i8")]
fn vld4q_lane_s16_(ptr: *const i8, a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, n: i32, size: i32) -> int16x8x4_t;
}
vld4q_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_lane_s16<const LANE: i32>(a: *const i16, b: int16x8x4_t) -> int16x8x4_t {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0i8")]
fn vld4q_lane_s16_(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, n: i64, ptr: *const i8) -> int16x8x4_t;
}
vld4q_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld4q_lane_s32<const LANE: i32>(a: *const i32, b: int32x4x4_t) -> int32x4x4_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0i8")]
fn vld4q_lane_s32_(ptr: *const i8, a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, n: i32, size: i32) -> int32x4x4_t;
}
vld4q_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_lane_s32<const LANE: i32>(a: *const i32, b: int32x4x4_t) -> int32x4x4_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0i8")]
fn vld4q_lane_s32_(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, n: i64, ptr: *const i8) -> int32x4x4_t;
}
vld4q_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t {
static_assert_imm3!(LANE);
transmute(vld4_lane_s8::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t {
static_assert_imm2!(LANE);
transmute(vld4_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t {
static_assert_imm1!(LANE);
transmute(vld4_lane_s32::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t {
static_assert_imm3!(LANE);
transmute(vld4q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t {
static_assert_imm2!(LANE);
transmute(vld4q_lane_s32::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t {
static_assert_imm3!(LANE);
transmute(vld4_lane_s8::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t {
static_assert_imm2!(LANE);
transmute(vld4_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4q_lane_p16<const LANE: i32>(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t {
static_assert_imm3!(LANE);
transmute(vld4q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld4_lane_f32<const LANE: i32>(a: *const f32, b: float32x2x4_t) -> float32x2x4_t {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0i8")]
fn vld4_lane_f32_(ptr: *const i8, a: float32x2_t, b: float32x2_t, c: float32x2_t, d: float32x2_t, n: i32, size: i32) -> float32x2x4_t;
}
vld4_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_lane_f32<const LANE: i32>(a: *const f32, b: float32x2x4_t) -> float32x2x4_t {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0i8")]
fn vld4_lane_f32_(a: float32x2_t, b: float32x2_t, c: float32x2_t, d: float32x2_t, n: i64, ptr: *const i8) -> float32x2x4_t;
}
vld4_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld4q_lane_f32<const LANE: i32>(a: *const f32, b: float32x4x4_t) -> float32x4x4_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0i8")]
fn vld4q_lane_f32_(ptr: *const i8, a: float32x4_t, b: float32x4_t, c: float32x4_t, d: float32x4_t, n: i32, size: i32) -> float32x4x4_t;
}
vld4q_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
}
/// Load multiple 4-element structures to four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_lane_f32<const LANE: i32>(a: *const f32, b: float32x4x4_t) -> float32x4x4_t {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0i8")]
fn vld4q_lane_f32_(a: float32x4_t, b: float32x4_t, c: float32x4_t, d: float32x4_t, n: i64, ptr: *const i8) -> float32x4x4_t;
}
vld4q_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8_t) {
static_assert_imm3!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4_t) {
static_assert_imm2!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2_t) {
static_assert_imm1!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1_t) {
static_assert!(LANE : i32 where LANE == 0);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16_t) {
static_assert_imm4!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8_t) {
static_assert_imm3!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4_t) {
static_assert_imm2!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2_t) {
static_assert_imm1!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8_t) {
static_assert_imm3!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4_t) {
static_assert_imm2!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2_t) {
static_assert_imm1!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1_t) {
static_assert!(LANE : i32 where LANE == 0);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16_t) {
static_assert_imm4!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8_t) {
static_assert_imm3!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4_t) {
static_assert_imm2!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2_t) {
static_assert_imm1!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8_t) {
static_assert_imm3!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4_t) {
static_assert_imm2!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16_t) {
static_assert_imm4!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8_t) {
static_assert_imm3!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1_t) {
static_assert!(LANE : i32 where LANE == 0);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2_t) {
static_assert_imm1!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2_t) {
static_assert_imm1!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_f32<const LANE: i32>(a: *mut f32, b: float32x4_t) {
static_assert_imm2!(LANE);
*a = simd_extract(b, LANE as u32);
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i8.v8i8")]
fn vst1_s8_x2_(ptr: *mut i8, a: int8x8_t, b: int8x8_t);
}
vst1_s8_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x2.v8i8.p0i8")]
fn vst1_s8_x2_(a: int8x8_t, b: int8x8_t, ptr: *mut i8);
}
vst1_s8_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i16.v4i16")]
fn vst1_s16_x2_(ptr: *mut i16, a: int16x4_t, b: int16x4_t);
}
vst1_s16_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x2.v4i16.p0i16")]
fn vst1_s16_x2_(a: int16x4_t, b: int16x4_t, ptr: *mut i16);
}
vst1_s16_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i32.v2i32")]
fn vst1_s32_x2_(ptr: *mut i32, a: int32x2_t, b: int32x2_t);
}
vst1_s32_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x2.v2i32.p0i32")]
fn vst1_s32_x2_(a: int32x2_t, b: int32x2_t, ptr: *mut i32);
}
vst1_s32_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i64.v1i64")]
fn vst1_s64_x2_(ptr: *mut i64, a: int64x1_t, b: int64x1_t);
}
vst1_s64_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x2.v1i64.p0i64")]
fn vst1_s64_x2_(a: int64x1_t, b: int64x1_t, ptr: *mut i64);
}
vst1_s64_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i8.v16i8")]
fn vst1q_s8_x2_(ptr: *mut i8, a: int8x16_t, b: int8x16_t);
}
vst1q_s8_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x2.v16i8.p0i8")]
fn vst1q_s8_x2_(a: int8x16_t, b: int8x16_t, ptr: *mut i8);
}
vst1q_s8_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i16.v8i16")]
fn vst1q_s16_x2_(ptr: *mut i16, a: int16x8_t, b: int16x8_t);
}
vst1q_s16_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x2.v8i16.p0i16")]
fn vst1q_s16_x2_(a: int16x8_t, b: int16x8_t, ptr: *mut i16);
}
vst1q_s16_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i32.v4i32")]
fn vst1q_s32_x2_(ptr: *mut i32, a: int32x4_t, b: int32x4_t);
}
vst1q_s32_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x2.v4i32.p0i32")]
fn vst1q_s32_x2_(a: int32x4_t, b: int32x4_t, ptr: *mut i32);
}
vst1q_s32_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0i64.v2i64")]
fn vst1q_s64_x2_(ptr: *mut i64, a: int64x2_t, b: int64x2_t);
}
vst1q_s64_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x2.v2i64.p0i64")]
fn vst1q_s64_x2_(a: int64x2_t, b: int64x2_t, ptr: *mut i64);
}
vst1q_s64_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v8i8")]
fn vst1_s8_x3_(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t);
}
vst1_s8_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x3.v8i8.p0i8")]
fn vst1_s8_x3_(a: int8x8_t, b: int8x8_t, c: int8x8_t, ptr: *mut i8);
}
vst1_s8_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v4i16")]
fn vst1_s16_x3_(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t);
}
vst1_s16_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x3.v4i16.p0i16")]
fn vst1_s16_x3_(a: int16x4_t, b: int16x4_t, c: int16x4_t, ptr: *mut i16);
}
vst1_s16_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v2i32")]
fn vst1_s32_x3_(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t);
}
vst1_s32_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x3.v2i32.p0i32")]
fn vst1_s32_x3_(a: int32x2_t, b: int32x2_t, c: int32x2_t, ptr: *mut i32);
}
vst1_s32_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v1i64")]
fn vst1_s64_x3_(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t);
}
vst1_s64_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x3.v1i64.p0i64")]
fn vst1_s64_x3_(a: int64x1_t, b: int64x1_t, c: int64x1_t, ptr: *mut i64);
}
vst1_s64_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i8.v16i8")]
fn vst1q_s8_x3_(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t);
}
vst1q_s8_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x3.v16i8.p0i8")]
fn vst1q_s8_x3_(a: int8x16_t, b: int8x16_t, c: int8x16_t, ptr: *mut i8);
}
vst1q_s8_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i16.v8i16")]
fn vst1q_s16_x3_(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t);
}
vst1q_s16_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x3.v8i16.p0i16")]
fn vst1q_s16_x3_(a: int16x8_t, b: int16x8_t, c: int16x8_t, ptr: *mut i16);
}
vst1q_s16_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i32.v4i32")]
fn vst1q_s32_x3_(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t);
}
vst1q_s32_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x3.v4i32.p0i32")]
fn vst1q_s32_x3_(a: int32x4_t, b: int32x4_t, c: int32x4_t, ptr: *mut i32);
}
vst1q_s32_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0i64.v2i64")]
fn vst1q_s64_x3_(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t);
}
vst1q_s64_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x3.v2i64.p0i64")]
fn vst1q_s64_x3_(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i64);
}
vst1q_s64_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v8i8")]
fn vst1_s8_x4_(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t);
}
vst1_s8_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x4.v8i8.p0i8")]
fn vst1_s8_x4_(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, ptr: *mut i8);
}
vst1_s8_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v4i16")]
fn vst1_s16_x4_(ptr: *mut i16, a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t);
}
vst1_s16_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x4.v4i16.p0i16")]
fn vst1_s16_x4_(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, ptr: *mut i16);
}
vst1_s16_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v2i32")]
fn vst1_s32_x4_(ptr: *mut i32, a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t);
}
vst1_s32_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x4.v2i32.p0i32")]
fn vst1_s32_x4_(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, ptr: *mut i32);
}
vst1_s32_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v1i64")]
fn vst1_s64_x4_(ptr: *mut i64, a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t);
}
vst1_s64_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x4.v1i64.p0i64")]
fn vst1_s64_x4_(a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t, ptr: *mut i64);
}
vst1_s64_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i8.v16i8")]
fn vst1q_s8_x4_(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t);
}
vst1q_s8_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x4.v16i8.p0i8")]
fn vst1q_s8_x4_(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, ptr: *mut i8);
}
vst1q_s8_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i16.v8i16")]
fn vst1q_s16_x4_(ptr: *mut i16, a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t);
}
vst1q_s16_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x4.v8i16.p0i16")]
fn vst1q_s16_x4_(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, ptr: *mut i16);
}
vst1q_s16_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i32.v4i32")]
fn vst1q_s32_x4_(ptr: *mut i32, a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t);
}
vst1q_s32_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x4.v4i32.p0i32")]
fn vst1q_s32_x4_(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, ptr: *mut i32);
}
vst1q_s32_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0i64.v2i64")]
fn vst1q_s64_x4_(ptr: *mut i64, a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t);
}
vst1q_s64_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures from one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x4.v2i64.p0i64")]
fn vst1q_s64_x4_(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i64);
}
vst1q_s64_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) {
vst1_s8_x2(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) {
vst1_s16_x2(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) {
vst1_s32_x2(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) {
vst1_s64_x2(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) {
vst1q_s8_x2(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) {
vst1q_s16_x2(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) {
vst1q_s32_x2(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) {
vst1q_s64_x2(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) {
vst1_s8_x3(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) {
vst1_s16_x3(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) {
vst1_s32_x3(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) {
vst1_s64_x3(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) {
vst1q_s8_x3(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) {
vst1q_s16_x3(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) {
vst1q_s32_x3(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) {
vst1q_s64_x3(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) {
vst1_s8_x4(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) {
vst1_s16_x4(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) {
vst1_s32_x4(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) {
vst1_s64_x4(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) {
vst1q_s8_x4(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) {
vst1q_s16_x4(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) {
vst1q_s32_x4(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) {
vst1q_s64_x4(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) {
vst1_s8_x2(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) {
vst1_s8_x3(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) {
vst1_s8_x4(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) {
vst1q_s8_x2(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) {
vst1q_s8_x3(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) {
vst1q_s8_x4(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) {
vst1_s16_x2(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) {
vst1_s16_x3(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) {
vst1_s16_x4(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x2)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) {
vst1q_s16_x2(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x3)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) {
vst1q_s16_x3(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x4)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) {
vst1q_s16_x4(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x2)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) {
vst1_s64_x2(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x3)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) {
vst1_s64_x3(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x4)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) {
vst1_s64_x4(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x2)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) {
vst1q_s64_x2(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x3)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) {
vst1q_s64_x3(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x4)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) {
vst1q_s64_x4(transmute(a), transmute(b))
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0f32.v2f32")]
fn vst1_f32_x2_(ptr: *mut f32, a: float32x2_t, b: float32x2_t);
}
vst1_f32_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x2.v2f32.p0f32")]
fn vst1_f32_x2_(a: float32x2_t, b: float32x2_t, ptr: *mut f32);
}
vst1_f32_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x2.p0f32.v4f32")]
fn vst1q_f32_x2_(ptr: *mut f32, a: float32x4_t, b: float32x4_t);
}
vst1q_f32_x2_(a, b.0, b.1)
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x2.v4f32.p0f32")]
fn vst1q_f32_x2_(a: float32x4_t, b: float32x4_t, ptr: *mut f32);
}
vst1q_f32_x2_(b.0, b.1, a)
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0f32.v2f32")]
fn vst1_f32_x3_(ptr: *mut f32, a: float32x2_t, b: float32x2_t, c: float32x2_t);
}
vst1_f32_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x3.v2f32.p0f32")]
fn vst1_f32_x3_(a: float32x2_t, b: float32x2_t, c: float32x2_t, ptr: *mut f32);
}
vst1_f32_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x3.p0f32.v4f32")]
fn vst1q_f32_x3_(ptr: *mut f32, a: float32x4_t, b: float32x4_t, c: float32x4_t);
}
vst1q_f32_x3_(a, b.0, b.1, b.2)
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x3.v4f32.p0f32")]
fn vst1q_f32_x3_(a: float32x4_t, b: float32x4_t, c: float32x4_t, ptr: *mut f32);
}
vst1q_f32_x3_(b.0, b.1, b.2, a)
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v2f32")]
fn vst1_f32_x4_(ptr: *mut f32, a: float32x2_t, b: float32x2_t, c: float32x2_t, d: float32x2_t);
}
vst1_f32_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x4.v2f32.p0f32")]
fn vst1_f32_x4_(a: float32x2_t, b: float32x2_t, c: float32x2_t, d: float32x2_t, ptr: *mut f32);
}
vst1_f32_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst1))]
pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst1x4.p0f32.v4f32")]
fn vst1q_f32_x4_(ptr: *mut f32, a: float32x4_t, b: float32x4_t, c: float32x4_t, d: float32x4_t);
}
vst1q_f32_x4_(a, b.0, b.1, b.2, b.3)
}
/// Store multiple single-element structures to one, two, three, or four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st1x4.v4f32.p0f32")]
fn vst1q_f32_x4_(a: float32x4_t, b: float32x4_t, c: float32x4_t, d: float32x4_t, ptr: *mut f32);
}
vst1q_f32_x4_(b.0, b.1, b.2, b.3, a)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst2))]
pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v8i8")]
fn vst2_s8_(ptr: *mut i8, a: int8x8_t, b: int8x8_t, size: i32);
}
vst2_s8_(a as _, b.0, b.1, 1)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2.v8i8.p0i8")]
fn vst2_s8_(a: int8x8_t, b: int8x8_t, ptr: *mut i8);
}
vst2_s8_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst2))]
pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4i16")]
fn vst2_s16_(ptr: *mut i8, a: int16x4_t, b: int16x4_t, size: i32);
}
vst2_s16_(a as _, b.0, b.1, 2)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2.v4i16.p0i8")]
fn vst2_s16_(a: int16x4_t, b: int16x4_t, ptr: *mut i8);
}
vst2_s16_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst2))]
pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v2i32")]
fn vst2_s32_(ptr: *mut i8, a: int32x2_t, b: int32x2_t, size: i32);
}
vst2_s32_(a as _, b.0, b.1, 4)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2.v2i32.p0i8")]
fn vst2_s32_(a: int32x2_t, b: int32x2_t, ptr: *mut i8);
}
vst2_s32_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst2))]
pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v16i8")]
fn vst2q_s8_(ptr: *mut i8, a: int8x16_t, b: int8x16_t, size: i32);
}
vst2q_s8_(a as _, b.0, b.1, 1)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2.v16i8.p0i8")]
fn vst2q_s8_(a: int8x16_t, b: int8x16_t, ptr: *mut i8);
}
vst2q_s8_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst2))]
pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v8i16")]
fn vst2q_s16_(ptr: *mut i8, a: int16x8_t, b: int16x8_t, size: i32);
}
vst2q_s16_(a as _, b.0, b.1, 2)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2.v8i16.p0i8")]
fn vst2q_s16_(a: int16x8_t, b: int16x8_t, ptr: *mut i8);
}
vst2q_s16_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst2))]
pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4i32")]
fn vst2q_s32_(ptr: *mut i8, a: int32x4_t, b: int32x4_t, size: i32);
}
vst2q_s32_(a as _, b.0, b.1, 4)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2.v4i32.p0i8")]
fn vst2q_s32_(a: int32x4_t, b: int32x4_t, ptr: *mut i8);
}
vst2q_s32_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(nop))]
pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v1i64")]
fn vst2_s64_(ptr: *mut i8, a: int64x1_t, b: int64x1_t, size: i32);
}
vst2_s64_(a as _, b.0, b.1, 8)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2.v1i64.p0i8")]
fn vst2_s64_(a: int64x1_t, b: int64x1_t, ptr: *mut i8);
}
vst2_s64_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) {
transmute(vst2_s8(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) {
transmute(vst2_s16(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) {
transmute(vst2_s32(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) {
transmute(vst2q_s8(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) {
transmute(vst2q_s16(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) {
transmute(vst2q_s32(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) {
transmute(vst2_s8(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) {
transmute(vst2_s16(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) {
transmute(vst2q_s8(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) {
transmute(vst2q_s16(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) {
transmute(vst2_s64(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) {
transmute(vst2_s64(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst2))]
pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v2f32")]
fn vst2_f32_(ptr: *mut i8, a: float32x2_t, b: float32x2_t, size: i32);
}
vst2_f32_(a as _, b.0, b.1, 4)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2.v2f32.p0i8")]
fn vst2_f32_(a: float32x2_t, b: float32x2_t, ptr: *mut i8);
}
vst2_f32_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst2))]
pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2.p0i8.v4f32")]
fn vst2q_f32_(ptr: *mut i8, a: float32x4_t, b: float32x4_t, size: i32);
}
vst2q_f32_(a as _, b.0, b.1, 4)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2.v4f32.p0i8")]
fn vst2q_f32_(a: float32x4_t, b: float32x4_t, ptr: *mut i8);
}
vst2q_f32_(b.0, b.1, a as _)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst2_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8x2_t) {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i8")]
fn vst2_lane_s8_(ptr: *mut i8, a: int8x8_t, b: int8x8_t, n: i32, size: i32);
}
vst2_lane_s8_(a as _, b.0, b.1, LANE, 1)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8x2_t) {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v8i8.p0i8")]
fn vst2_lane_s8_(a: int8x8_t, b: int8x8_t, n: i64, ptr: *mut i8);
}
vst2_lane_s8_(b.0, b.1, LANE as i64, a as _)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst2_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4x2_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i16")]
fn vst2_lane_s16_(ptr: *mut i8, a: int16x4_t, b: int16x4_t, n: i32, size: i32);
}
vst2_lane_s16_(a as _, b.0, b.1, LANE, 2)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4x2_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v4i16.p0i8")]
fn vst2_lane_s16_(a: int16x4_t, b: int16x4_t, n: i64, ptr: *mut i8);
}
vst2_lane_s16_(b.0, b.1, LANE as i64, a as _)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst2_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2x2_t) {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2i32")]
fn vst2_lane_s32_(ptr: *mut i8, a: int32x2_t, b: int32x2_t, n: i32, size: i32);
}
vst2_lane_s32_(a as _, b.0, b.1, LANE, 4)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2x2_t) {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v2i32.p0i8")]
fn vst2_lane_s32_(a: int32x2_t, b: int32x2_t, n: i64, ptr: *mut i8);
}
vst2_lane_s32_(b.0, b.1, LANE as i64, a as _)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst2q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8x2_t) {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i16")]
fn vst2q_lane_s16_(ptr: *mut i8, a: int16x8_t, b: int16x8_t, n: i32, size: i32);
}
vst2q_lane_s16_(a as _, b.0, b.1, LANE, 2)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8x2_t) {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v8i16.p0i8")]
fn vst2q_lane_s16_(a: int16x8_t, b: int16x8_t, n: i64, ptr: *mut i8);
}
vst2q_lane_s16_(b.0, b.1, LANE as i64, a as _)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst2q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4x2_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i32")]
fn vst2q_lane_s32_(ptr: *mut i8, a: int32x4_t, b: int32x4_t, n: i32, size: i32);
}
vst2q_lane_s32_(a as _, b.0, b.1, LANE, 4)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4x2_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v4i32.p0i8")]
fn vst2q_lane_s32_(a: int32x4_t, b: int32x4_t, n: i64, ptr: *mut i8);
}
vst2q_lane_s32_(b.0, b.1, LANE as i64, a as _)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x2_t) {
static_assert_imm3!(LANE);
transmute(vst2_lane_s8::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x2_t) {
static_assert_imm2!(LANE);
transmute(vst2_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x2_t) {
static_assert_imm1!(LANE);
transmute(vst2_lane_s32::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x2_t) {
static_assert_imm3!(LANE);
transmute(vst2q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x2_t) {
static_assert_imm2!(LANE);
transmute(vst2q_lane_s32::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x2_t) {
static_assert_imm3!(LANE);
transmute(vst2_lane_s8::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x2_t) {
static_assert_imm2!(LANE);
transmute(vst2_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8x2_t) {
static_assert_imm3!(LANE);
transmute(vst2q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst2_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2x2_t) {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2f32")]
fn vst2_lane_f32_(ptr: *mut i8, a: float32x2_t, b: float32x2_t, n: i32, size: i32);
}
vst2_lane_f32_(a as _, b.0, b.1, LANE, 4)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2x2_t) {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v2f32.p0i8")]
fn vst2_lane_f32_(a: float32x2_t, b: float32x2_t, n: i64, ptr: *mut i8);
}
vst2_lane_f32_(b.0, b.1, LANE as i64, a as _)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst2q_lane_f32<const LANE: i32>(a: *mut f32, b: float32x4x2_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4f32")]
fn vst2q_lane_f32_(ptr: *mut i8, a: float32x4_t, b: float32x4_t, n: i32, size: i32);
}
vst2q_lane_f32_(a as _, b.0, b.1, LANE, 4)
}
/// Store multiple 2-element structures from two registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2q_lane_f32<const LANE: i32>(a: *mut f32, b: float32x4x2_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v4f32.p0i8")]
fn vst2q_lane_f32_(a: float32x4_t, b: float32x4_t, n: i64, ptr: *mut i8);
}
vst2q_lane_f32_(b.0, b.1, LANE as i64, a as _)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst3))]
pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i8")]
fn vst3_s8_(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, size: i32);
}
vst3_s8_(a as _, b.0, b.1, b.2, 1)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3.v8i8.p0i8")]
fn vst3_s8_(a: int8x8_t, b: int8x8_t, c: int8x8_t, ptr: *mut i8);
}
vst3_s8_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst3))]
pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i16")]
fn vst3_s16_(ptr: *mut i8, a: int16x4_t, b: int16x4_t, c: int16x4_t, size: i32);
}
vst3_s16_(a as _, b.0, b.1, b.2, 2)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3.v4i16.p0i8")]
fn vst3_s16_(a: int16x4_t, b: int16x4_t, c: int16x4_t, ptr: *mut i8);
}
vst3_s16_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst3))]
pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2i32")]
fn vst3_s32_(ptr: *mut i8, a: int32x2_t, b: int32x2_t, c: int32x2_t, size: i32);
}
vst3_s32_(a as _, b.0, b.1, b.2, 4)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3.v2i32.p0i8")]
fn vst3_s32_(a: int32x2_t, b: int32x2_t, c: int32x2_t, ptr: *mut i8);
}
vst3_s32_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst3))]
pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v16i8")]
fn vst3q_s8_(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, size: i32);
}
vst3q_s8_(a as _, b.0, b.1, b.2, 1)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3.v16i8.p0i8")]
fn vst3q_s8_(a: int8x16_t, b: int8x16_t, c: int8x16_t, ptr: *mut i8);
}
vst3q_s8_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst3))]
pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v8i16")]
fn vst3q_s16_(ptr: *mut i8, a: int16x8_t, b: int16x8_t, c: int16x8_t, size: i32);
}
vst3q_s16_(a as _, b.0, b.1, b.2, 2)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3.v8i16.p0i8")]
fn vst3q_s16_(a: int16x8_t, b: int16x8_t, c: int16x8_t, ptr: *mut i8);
}
vst3q_s16_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst3))]
pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4i32")]
fn vst3q_s32_(ptr: *mut i8, a: int32x4_t, b: int32x4_t, c: int32x4_t, size: i32);
}
vst3q_s32_(a as _, b.0, b.1, b.2, 4)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3.v4i32.p0i8")]
fn vst3q_s32_(a: int32x4_t, b: int32x4_t, c: int32x4_t, ptr: *mut i8);
}
vst3q_s32_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(nop))]
pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v1i64")]
fn vst3_s64_(ptr: *mut i8, a: int64x1_t, b: int64x1_t, c: int64x1_t, size: i32);
}
vst3_s64_(a as _, b.0, b.1, b.2, 8)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3.v1i64.p0i8")]
fn vst3_s64_(a: int64x1_t, b: int64x1_t, c: int64x1_t, ptr: *mut i8);
}
vst3_s64_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) {
transmute(vst3_s8(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) {
transmute(vst3_s16(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) {
transmute(vst3_s32(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) {
transmute(vst3q_s8(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) {
transmute(vst3q_s16(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) {
transmute(vst3q_s32(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) {
transmute(vst3_s8(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) {
transmute(vst3_s16(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) {
transmute(vst3q_s8(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) {
transmute(vst3q_s16(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) {
transmute(vst3_s64(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) {
transmute(vst3_s64(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst3))]
pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v2f32")]
fn vst3_f32_(ptr: *mut i8, a: float32x2_t, b: float32x2_t, c: float32x2_t, size: i32);
}
vst3_f32_(a as _, b.0, b.1, b.2, 4)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3.v2f32.p0i8")]
fn vst3_f32_(a: float32x2_t, b: float32x2_t, c: float32x2_t, ptr: *mut i8);
}
vst3_f32_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst3))]
pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3.p0i8.v4f32")]
fn vst3q_f32_(ptr: *mut i8, a: float32x4_t, b: float32x4_t, c: float32x4_t, size: i32);
}
vst3q_f32_(a as _, b.0, b.1, b.2, 4)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3.v4f32.p0i8")]
fn vst3q_f32_(a: float32x4_t, b: float32x4_t, c: float32x4_t, ptr: *mut i8);
}
vst3q_f32_(b.0, b.1, b.2, a as _)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst3_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8x3_t) {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i8")]
fn vst3_lane_s8_(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i32, size: i32);
}
vst3_lane_s8_(a as _, b.0, b.1, b.2, LANE, 1)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8x3_t) {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v8i8.p0i8")]
fn vst3_lane_s8_(a: int8x8_t, b: int8x8_t, c: int8x8_t, n: i64, ptr: *mut i8);
}
vst3_lane_s8_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst3_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4x3_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i16")]
fn vst3_lane_s16_(ptr: *mut i8, a: int16x4_t, b: int16x4_t, c: int16x4_t, n: i32, size: i32);
}
vst3_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4x3_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v4i16.p0i8")]
fn vst3_lane_s16_(a: int16x4_t, b: int16x4_t, c: int16x4_t, n: i64, ptr: *mut i8);
}
vst3_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst3_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2x3_t) {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2i32")]
fn vst3_lane_s32_(ptr: *mut i8, a: int32x2_t, b: int32x2_t, c: int32x2_t, n: i32, size: i32);
}
vst3_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2x3_t) {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v2i32.p0i8")]
fn vst3_lane_s32_(a: int32x2_t, b: int32x2_t, c: int32x2_t, n: i64, ptr: *mut i8);
}
vst3_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst3q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8x3_t) {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i16")]
fn vst3q_lane_s16_(ptr: *mut i8, a: int16x8_t, b: int16x8_t, c: int16x8_t, n: i32, size: i32);
}
vst3q_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8x3_t) {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v8i16.p0i8")]
fn vst3q_lane_s16_(a: int16x8_t, b: int16x8_t, c: int16x8_t, n: i64, ptr: *mut i8);
}
vst3q_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst3q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4x3_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i32")]
fn vst3q_lane_s32_(ptr: *mut i8, a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i32, size: i32);
}
vst3q_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4x3_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v4i32.p0i8")]
fn vst3q_lane_s32_(a: int32x4_t, b: int32x4_t, c: int32x4_t, n: i64, ptr: *mut i8);
}
vst3q_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x3_t) {
static_assert_imm3!(LANE);
transmute(vst3_lane_s8::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x3_t) {
static_assert_imm2!(LANE);
transmute(vst3_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x3_t) {
static_assert_imm1!(LANE);
transmute(vst3_lane_s32::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x3_t) {
static_assert_imm3!(LANE);
transmute(vst3q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x3_t) {
static_assert_imm2!(LANE);
transmute(vst3q_lane_s32::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x3_t) {
static_assert_imm3!(LANE);
transmute(vst3_lane_s8::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x3_t) {
static_assert_imm2!(LANE);
transmute(vst3_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8x3_t) {
static_assert_imm3!(LANE);
transmute(vst3q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst3_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2x3_t) {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2f32")]
fn vst3_lane_f32_(ptr: *mut i8, a: float32x2_t, b: float32x2_t, c: float32x2_t, n: i32, size: i32);
}
vst3_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2x3_t) {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v2f32.p0i8")]
fn vst3_lane_f32_(a: float32x2_t, b: float32x2_t, c: float32x2_t, n: i64, ptr: *mut i8);
}
vst3_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst3q_lane_f32<const LANE: i32>(a: *mut f32, b: float32x4x3_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4f32")]
fn vst3q_lane_f32_(ptr: *mut i8, a: float32x4_t, b: float32x4_t, c: float32x4_t, n: i32, size: i32);
}
vst3q_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4)
}
/// Store multiple 3-element structures from three registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3q_lane_f32<const LANE: i32>(a: *mut f32, b: float32x4x3_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v4f32.p0i8")]
fn vst3q_lane_f32_(a: float32x4_t, b: float32x4_t, c: float32x4_t, n: i64, ptr: *mut i8);
}
vst3q_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst4))]
pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i8")]
fn vst4_s8_(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, size: i32);
}
vst4_s8_(a as _, b.0, b.1, b.2, b.3, 1)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4.v8i8.p0i8")]
fn vst4_s8_(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, ptr: *mut i8);
}
vst4_s8_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst4))]
pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i16")]
fn vst4_s16_(ptr: *mut i8, a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, size: i32);
}
vst4_s16_(a as _, b.0, b.1, b.2, b.3, 2)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4.v4i16.p0i8")]
fn vst4_s16_(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, ptr: *mut i8);
}
vst4_s16_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst4))]
pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2i32")]
fn vst4_s32_(ptr: *mut i8, a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, size: i32);
}
vst4_s32_(a as _, b.0, b.1, b.2, b.3, 4)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4.v2i32.p0i8")]
fn vst4_s32_(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, ptr: *mut i8);
}
vst4_s32_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst4))]
pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v16i8")]
fn vst4q_s8_(ptr: *mut i8, a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, size: i32);
}
vst4q_s8_(a as _, b.0, b.1, b.2, b.3, 1)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4.v16i8.p0i8")]
fn vst4q_s8_(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, ptr: *mut i8);
}
vst4q_s8_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst4))]
pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v8i16")]
fn vst4q_s16_(ptr: *mut i8, a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, size: i32);
}
vst4q_s16_(a as _, b.0, b.1, b.2, b.3, 2)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4.v8i16.p0i8")]
fn vst4q_s16_(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, ptr: *mut i8);
}
vst4q_s16_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst4))]
pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4i32")]
fn vst4q_s32_(ptr: *mut i8, a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, size: i32);
}
vst4q_s32_(a as _, b.0, b.1, b.2, b.3, 4)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4.v4i32.p0i8")]
fn vst4q_s32_(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, ptr: *mut i8);
}
vst4q_s32_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(nop))]
pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v1i64")]
fn vst4_s64_(ptr: *mut i8, a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t, size: i32);
}
vst4_s64_(a as _, b.0, b.1, b.2, b.3, 8)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4.v1i64.p0i8")]
fn vst4_s64_(a: int64x1_t, b: int64x1_t, c: int64x1_t, d: int64x1_t, ptr: *mut i8);
}
vst4_s64_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) {
transmute(vst4_s8(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) {
transmute(vst4_s16(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) {
transmute(vst4_s32(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) {
transmute(vst4q_s8(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) {
transmute(vst4q_s16(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) {
transmute(vst4q_s32(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) {
transmute(vst4_s8(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) {
transmute(vst4_s16(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) {
transmute(vst4q_s8(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) {
transmute(vst4q_s16(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) {
transmute(vst4_s64(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) {
transmute(vst4_s64(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst4))]
pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v2f32")]
fn vst4_f32_(ptr: *mut i8, a: float32x2_t, b: float32x2_t, c: float32x2_t, d: float32x2_t, size: i32);
}
vst4_f32_(a as _, b.0, b.1, b.2, b.3, 4)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4.v2f32.p0i8")]
fn vst4_f32_(a: float32x2_t, b: float32x2_t, c: float32x2_t, d: float32x2_t, ptr: *mut i8);
}
vst4_f32_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst4))]
pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4.p0i8.v4f32")]
fn vst4q_f32_(ptr: *mut i8, a: float32x4_t, b: float32x4_t, c: float32x4_t, d: float32x4_t, size: i32);
}
vst4q_f32_(a as _, b.0, b.1, b.2, b.3, 4)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4.v4f32.p0i8")]
fn vst4q_f32_(a: float32x4_t, b: float32x4_t, c: float32x4_t, d: float32x4_t, ptr: *mut i8);
}
vst4q_f32_(b.0, b.1, b.2, b.3, a as _)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst4_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8x4_t) {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i8")]
fn vst4_lane_s8_(ptr: *mut i8, a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, n: i32, size: i32);
}
vst4_lane_s8_(a as _, b.0, b.1, b.2, b.3, LANE, 1)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8x4_t) {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v8i8.p0i8")]
fn vst4_lane_s8_(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, n: i64, ptr: *mut i8);
}
vst4_lane_s8_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst4_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4x4_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i16")]
fn vst4_lane_s16_(ptr: *mut i8, a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, n: i32, size: i32);
}
vst4_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4x4_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v4i16.p0i8")]
fn vst4_lane_s16_(a: int16x4_t, b: int16x4_t, c: int16x4_t, d: int16x4_t, n: i64, ptr: *mut i8);
}
vst4_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst4_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2x4_t) {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2i32")]
fn vst4_lane_s32_(ptr: *mut i8, a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, n: i32, size: i32);
}
vst4_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2x4_t) {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v2i32.p0i8")]
fn vst4_lane_s32_(a: int32x2_t, b: int32x2_t, c: int32x2_t, d: int32x2_t, n: i64, ptr: *mut i8);
}
vst4_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst4q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8x4_t) {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i16")]
fn vst4q_lane_s16_(ptr: *mut i8, a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, n: i32, size: i32);
}
vst4q_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8x4_t) {
static_assert_imm3!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v8i16.p0i8")]
fn vst4q_lane_s16_(a: int16x8_t, b: int16x8_t, c: int16x8_t, d: int16x8_t, n: i64, ptr: *mut i8);
}
vst4q_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst4q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4x4_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i32")]
fn vst4q_lane_s32_(ptr: *mut i8, a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, n: i32, size: i32);
}
vst4q_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4x4_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v4i32.p0i8")]
fn vst4q_lane_s32_(a: int32x4_t, b: int32x4_t, c: int32x4_t, d: int32x4_t, n: i64, ptr: *mut i8);
}
vst4q_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x4_t) {
static_assert_imm3!(LANE);
transmute(vst4_lane_s8::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x4_t) {
static_assert_imm2!(LANE);
transmute(vst4_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x4_t) {
static_assert_imm1!(LANE);
transmute(vst4_lane_s32::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x4_t) {
static_assert_imm3!(LANE);
transmute(vst4q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x4_t) {
static_assert_imm2!(LANE);
transmute(vst4q_lane_s32::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x4_t) {
static_assert_imm3!(LANE);
transmute(vst4_lane_s8::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x4_t) {
static_assert_imm2!(LANE);
transmute(vst4_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8x4_t) {
static_assert_imm3!(LANE);
transmute(vst4q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst4_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2x4_t) {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2f32")]
fn vst4_lane_f32_(ptr: *mut i8, a: float32x2_t, b: float32x2_t, c: float32x2_t, d: float32x2_t, n: i32, size: i32);
}
vst4_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2x4_t) {
static_assert_imm1!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v2f32.p0i8")]
fn vst4_lane_f32_(a: float32x2_t, b: float32x2_t, c: float32x2_t, d: float32x2_t, n: i64, ptr: *mut i8);
}
vst4_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vst4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst4q_lane_f32<const LANE: i32>(a: *mut f32, b: float32x4x4_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4f32")]
fn vst4q_lane_f32_(ptr: *mut i8, a: float32x4_t, b: float32x4_t, c: float32x4_t, d: float32x4_t, n: i32, size: i32);
}
vst4q_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
}
/// Store multiple 4-element structures from four registers
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(st4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4q_lane_f32<const LANE: i32>(a: *mut f32, b: float32x4x4_t) {
static_assert_imm2!(LANE);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v4f32.p0i8")]
fn vst4q_lane_f32_(a: float32x4_t, b: float32x4_t, c: float32x4_t, d: float32x4_t, n: i64, ptr: *mut i8);
}
vst4q_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
simd_mul(a, b)
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
simd_mul(a, b)
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
simd_mul(a, b)
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
simd_mul(a, b)
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
simd_mul(a, b)
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
simd_mul(a, b)
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
simd_mul(a, b)
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
simd_mul(a, b)
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
simd_mul(a, b)
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
simd_mul(a, b)
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
simd_mul(a, b)
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
simd_mul(a, b)
}
/// Polynomial multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.pmul.v8i8")]
fn vmul_p8_(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t;
}
vmul_p8_(a, b)
}
/// Polynomial multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulp.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.pmul.v16i8")]
fn vmulq_p8_(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t;
}
vmulq_p8_(a, b)
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
simd_mul(a, b)
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
simd_mul(a, b)
}
/// Vector multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t {
simd_mul(a, vdup_n_s16(b))
}
/// Vector multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t {
simd_mul(a, vdupq_n_s16(b))
}
/// Vector multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t {
simd_mul(a, vdup_n_s32(b))
}
/// Vector multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t {
simd_mul(a, vdupq_n_s32(b))
}
/// Vector multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t {
simd_mul(a, vdup_n_u16(b))
}
/// Vector multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t {
simd_mul(a, vdupq_n_u16(b))
}
/// Vector multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t {
simd_mul(a, vdup_n_u32(b))
}
/// Vector multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t {
simd_mul(a, vdupq_n_u32(b))
}
/// Vector multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t {
simd_mul(a, vdup_n_f32(b))
}
/// Vector multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t {
simd_mul(a, vdupq_n_f32(b))
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
static_assert_imm2!(LANE);
simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> int16x4_t {
static_assert_imm3!(LANE);
simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
static_assert_imm2!(LANE);
simd_mul(a, simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
static_assert_imm3!(LANE);
simd_mul(a, simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
static_assert_imm1!(LANE);
simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> int32x2_t {
static_assert_imm2!(LANE);
simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
static_assert_imm1!(LANE);
simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
static_assert_imm2!(LANE);
simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
static_assert_imm2!(LANE);
simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t {
static_assert_imm3!(LANE);
simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t {
static_assert_imm2!(LANE);
simd_mul(a, simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
static_assert_imm3!(LANE);
simd_mul(a, simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
static_assert_imm1!(LANE);
simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t {
static_assert_imm2!(LANE);
simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t {
static_assert_imm1!(LANE);
simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
static_assert_imm2!(LANE);
simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Floating-point multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
static_assert_imm1!(LANE);
simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Floating-point multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
static_assert_imm2!(LANE);
simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Floating-point multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
static_assert_imm1!(LANE);
simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Floating-point multiply
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
static_assert_imm2!(LANE);
simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Signed multiply long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smull.v8i8")]
fn vmull_s8_(a: int8x8_t, b: int8x8_t) -> int16x8_t;
}
vmull_s8_(a, b)
}
/// Signed multiply long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smull.v4i16")]
fn vmull_s16_(a: int16x4_t, b: int16x4_t) -> int32x4_t;
}
vmull_s16_(a, b)
}
/// Signed multiply long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmulls.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smull.v2i32")]
fn vmull_s32_(a: int32x2_t, b: int32x2_t) -> int64x2_t;
}
vmull_s32_(a, b)
}
/// Unsigned multiply long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umull.v8i8")]
fn vmull_u8_(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t;
}
vmull_u8_(a, b)
}
/// Unsigned multiply long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umull.v4i16")]
fn vmull_u16_(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t;
}
vmull_u16_(a, b)
}
/// Unsigned multiply long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullu.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umull.v2i32")]
fn vmull_u32_(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t;
}
vmull_u32_(a, b)
}
/// Polynomial multiply long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.p8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmull))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmullp.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.pmull.v8i8")]
fn vmull_p8_(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t;
}
vmull_p8_(a, b)
}
/// Vector long multiply with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t {
vmull_s16(a, vdup_n_s16(b))
}
/// Vector long multiply with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t {
vmull_s32(a, vdup_n_s32(b))
}
/// Vector long multiply with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t {
vmull_u16(a, vdup_n_u16(b))
}
/// Vector long multiply with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t {
vmull_u32(a, vdup_n_u32(b))
}
/// Vector long multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int32x4_t {
static_assert_imm2!(LANE);
vmull_s16(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector long multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t {
static_assert_imm3!(LANE);
vmull_s16(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector long multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int64x2_t {
static_assert_imm1!(LANE);
vmull_s32(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector long multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t {
static_assert_imm2!(LANE);
vmull_s32(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector long multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t {
static_assert_imm2!(LANE);
vmull_u16(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector long multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x8_t) -> uint32x4_t {
static_assert_imm3!(LANE);
vmull_u16(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector long multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t {
static_assert_imm1!(LANE);
vmull_u32(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Vector long multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x4_t) -> uint64x2_t {
static_assert_imm2!(LANE);
vmull_u32(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
}
/// Floating-point fused Multiply-Add to accumulator(vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v2f32")]
fn vfma_f32_(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
}
vfma_f32_(b, c, a)
}
/// Floating-point fused Multiply-Add to accumulator(vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.fma.v4f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.v4f32")]
fn vfmaq_f32_(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
}
vfmaq_f32_(b, c, a)
}
/// Floating-point fused Multiply-Add to accumulator(vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t {
vfma_f32(a, b, vdup_n_f32_vfp4(c))
}
/// Floating-point fused Multiply-Add to accumulator(vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t {
vfmaq_f32(a, b, vdupq_n_f32_vfp4(c))
}
/// Floating-point fused multiply-subtract from accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
let b: float32x2_t = simd_neg(b);
vfma_f32(a, b, c)
}
/// Floating-point fused multiply-subtract from accumulator
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
let b: float32x4_t = simd_neg(b);
vfmaq_f32(a, b, c)
}
/// Floating-point fused Multiply-subtract to accumulator(vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t {
vfms_f32(a, b, vdup_n_f32_vfp4(c))
}
/// Floating-point fused Multiply-subtract to accumulator(vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t {
vfmsq_f32(a, b, vdupq_n_f32_vfp4(c))
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
simd_sub(a, b)
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
simd_sub(a, b)
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
simd_sub(a, b)
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
simd_sub(a, b)
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
simd_sub(a, b)
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
simd_sub(a, b)
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
simd_sub(a, b)
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
simd_sub(a, b)
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
simd_sub(a, b)
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
simd_sub(a, b)
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
simd_sub(a, b)
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
simd_sub(a, b)
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
simd_sub(a, b)
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
simd_sub(a, b)
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
simd_sub(a, b)
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
simd_sub(a, b)
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
simd_sub(a, b)
}
/// Subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
simd_sub(a, b)
}
/// Bitwise exclusive OR
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
simd_xor(a, b)
}
/// Bitwise exclusive OR
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
simd_xor(a, b)
}
/// Bitwise exclusive OR
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
simd_xor(a, b)
}
/// Bitwise exclusive OR
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
simd_xor(a, b)
}
/// Bitwise exclusive OR
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
simd_xor(a, b)
}
/// Bitwise exclusive OR
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
simd_xor(a, b)
}
/// Bitwise exclusive OR
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p128)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vaddq_p128(a: p128, b: p128) -> p128 {
a ^ b
}
/// Subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t {
let c: i16x8 = i16x8::new(8, 8, 8, 8, 8, 8, 8, 8);
simd_cast(simd_shr(simd_sub(a, b), transmute(c)))
}
/// Subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t {
let c: i32x4 = i32x4::new(16, 16, 16, 16);
simd_cast(simd_shr(simd_sub(a, b), transmute(c)))
}
/// Subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t {
let c: i64x2 = i64x2::new(32, 32);
simd_cast(simd_shr(simd_sub(a, b), transmute(c)))
}
/// Subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t {
let c: u16x8 = u16x8::new(8, 8, 8, 8, 8, 8, 8, 8);
simd_cast(simd_shr(simd_sub(a, b), transmute(c)))
}
/// Subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t {
let c: u32x4 = u32x4::new(16, 16, 16, 16);
simd_cast(simd_shr(simd_sub(a, b), transmute(c)))
}
/// Subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t {
let c: u64x2 = u64x2::new(32, 32);
simd_cast(simd_shr(simd_sub(a, b), transmute(c)))
}
/// Subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
let d: int8x8_t = vsubhn_s16(b, c);
simd_shuffle16!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
let d: int16x4_t = vsubhn_s32(b, c);
simd_shuffle8!(a, d, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
let d: int32x2_t = vsubhn_s64(b, c);
simd_shuffle4!(a, d, [0, 1, 2, 3])
}
/// Subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
let d: uint8x8_t = vsubhn_u16(b, c);
simd_shuffle16!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
let d: uint16x4_t = vsubhn_u32(b, c);
simd_shuffle8!(a, d, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
let d: uint32x2_t = vsubhn_u64(b, c);
simd_shuffle4!(a, d, [0, 1, 2, 3])
}
/// Signed halving subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhsub.v8i8")]
fn vhsub_u8_(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t;
}
vhsub_u8_(a, b)
}
/// Signed halving subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhsub.v16i8")]
fn vhsubq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
}
vhsubq_u8_(a, b)
}
/// Signed halving subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhsub.v4i16")]
fn vhsub_u16_(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t;
}
vhsub_u16_(a, b)
}
/// Signed halving subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhsub.v8i16")]
fn vhsubq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
}
vhsubq_u16_(a, b)
}
/// Signed halving subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhsub.v2i32")]
fn vhsub_u32_(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t;
}
vhsub_u32_(a, b)
}
/// Signed halving subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubu.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uhsub.v4i32")]
fn vhsubq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
}
vhsubq_u32_(a, b)
}
/// Signed halving subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shsub.v8i8")]
fn vhsub_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t;
}
vhsub_s8_(a, b)
}
/// Signed halving subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shsub.v16i8")]
fn vhsubq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t;
}
vhsubq_s8_(a, b)
}
/// Signed halving subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shsub.v4i16")]
fn vhsub_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t;
}
vhsub_s16_(a, b)
}
/// Signed halving subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shsub.v8i16")]
fn vhsubq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t;
}
vhsubq_s16_(a, b)
}
/// Signed halving subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shsub.v2i32")]
fn vhsub_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t;
}
vhsub_s32_(a, b)
}
/// Signed halving subtract
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vhsubs.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.shsub.v4i32")]
fn vhsubq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t;
}
vhsubq_s32_(a, b)
}
/// Signed Subtract Wide
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubw))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t {
simd_sub(a, simd_cast(b))
}
/// Signed Subtract Wide
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubw))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t {
simd_sub(a, simd_cast(b))
}
/// Signed Subtract Wide
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubw))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t {
simd_sub(a, simd_cast(b))
}
/// Unsigned Subtract Wide
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubw))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t {
simd_sub(a, simd_cast(b))
}
/// Unsigned Subtract Wide
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubw))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t {
simd_sub(a, simd_cast(b))
}
/// Unsigned Subtract Wide
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubw))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t {
simd_sub(a, simd_cast(b))
}
/// Signed Subtract Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t {
let c: int16x8_t = simd_cast(a);
let d: int16x8_t = simd_cast(b);
simd_sub(c, d)
}
/// Signed Subtract Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t {
let c: int32x4_t = simd_cast(a);
let d: int32x4_t = simd_cast(b);
simd_sub(c, d)
}
/// Signed Subtract Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t {
let c: int64x2_t = simd_cast(a);
let d: int64x2_t = simd_cast(b);
simd_sub(c, d)
}
/// Unsigned Subtract Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t {
let c: uint16x8_t = simd_cast(a);
let d: uint16x8_t = simd_cast(b);
simd_sub(c, d)
}
/// Unsigned Subtract Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t {
let c: uint32x4_t = simd_cast(a);
let d: uint32x4_t = simd_cast(b);
simd_sub(c, d)
}
/// Unsigned Subtract Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t {
let c: uint64x2_t = simd_cast(a);
let d: uint64x2_t = simd_cast(b);
simd_sub(c, d)
}
/// Maximum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smax.v8i8")]
fn vmax_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t;
}
vmax_s8_(a, b)
}
/// Maximum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smax.v16i8")]
fn vmaxq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t;
}
vmaxq_s8_(a, b)
}
/// Maximum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smax.v4i16")]
fn vmax_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t;
}
vmax_s16_(a, b)
}
/// Maximum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smax.v8i16")]
fn vmaxq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t;
}
vmaxq_s16_(a, b)
}
/// Maximum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smax.v2i32")]
fn vmax_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t;
}
vmax_s32_(a, b)
}
/// Maximum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smax.v4i32")]
fn vmaxq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t;
}
vmaxq_s32_(a, b)
}
/// Maximum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umax.v8i8")]
fn vmax_u8_(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t;
}
vmax_u8_(a, b)
}
/// Maximum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umax.v16i8")]
fn vmaxq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
}
vmaxq_u8_(a, b)
}
/// Maximum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umax.v4i16")]
fn vmax_u16_(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t;
}
vmax_u16_(a, b)
}
/// Maximum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umax.v8i16")]
fn vmaxq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
}
vmaxq_u16_(a, b)
}
/// Maximum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umax.v2i32")]
fn vmax_u32_(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t;
}
vmax_u32_(a, b)
}
/// Maximum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxu.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umax.v4i32")]
fn vmaxq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
}
vmaxq_u32_(a, b)
}
/// Maximum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmax))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmax.v2f32")]
fn vmax_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
}
vmax_f32_(a, b)
}
/// Maximum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmax))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxs.v4f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmax.v4f32")]
fn vmaxq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
}
vmaxq_f32_(a, b)
}
/// Floating-point Maximum Number (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmaxnm))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnm.v2f32")]
fn vmaxnm_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
}
vmaxnm_f32_(a, b)
}
/// Floating-point Maximum Number (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmaxnm))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmaxnm.v4f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmaxnm.v4f32")]
fn vmaxnmq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
}
vmaxnmq_f32_(a, b)
}
/// Minimum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smin.v8i8")]
fn vmin_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t;
}
vmin_s8_(a, b)
}
/// Minimum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smin.v16i8")]
fn vminq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t;
}
vminq_s8_(a, b)
}
/// Minimum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smin.v4i16")]
fn vmin_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t;
}
vmin_s16_(a, b)
}
/// Minimum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smin.v8i16")]
fn vminq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t;
}
vminq_s16_(a, b)
}
/// Minimum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smin.v2i32")]
fn vmin_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t;
}
vmin_s32_(a, b)
}
/// Minimum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.smin.v4i32")]
fn vminq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t;
}
vminq_s32_(a, b)
}
/// Minimum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umin.v8i8")]
fn vmin_u8_(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t;
}
vmin_u8_(a, b)
}
/// Minimum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umin.v16i8")]
fn vminq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
}
vminq_u8_(a, b)
}
/// Minimum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umin.v4i16")]
fn vmin_u16_(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t;
}
vmin_u16_(a, b)
}
/// Minimum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umin.v8i16")]
fn vminq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
}
vminq_u16_(a, b)
}
/// Minimum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umin.v2i32")]
fn vmin_u32_(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t;
}
vmin_u32_(a, b)
}
/// Minimum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminu.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.umin.v4i32")]
fn vminq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
}
vminq_u32_(a, b)
}
/// Minimum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmin))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmin.v2f32")]
fn vmin_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
}
vmin_f32_(a, b)
}
/// Minimum (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmin))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vmins.v4f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fmin.v4f32")]
fn vminq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
}
vminq_f32_(a, b)
}
/// Floating-point Minimum Number (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fminnm))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnm.v2f32")]
fn vminnm_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
}
vminnm_f32_(a, b)
}
/// Floating-point Minimum Number (vector)
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fminnm))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vminnm.v4f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.fminnm.v4f32")]
fn vminnmq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
}
vminnmq_f32_(a, b)
}
/// Floating-point add pairwise
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(faddp))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vpadd.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.faddp.v2f32")]
fn vpadd_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
}
vpadd_f32_(a, b)
}
/// Signed saturating doubling multiply long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqdmull.v4i32")]
fn vqdmull_s16_(a: int16x4_t, b: int16x4_t) -> int32x4_t;
}
vqdmull_s16_(a, b)
}
/// Signed saturating doubling multiply long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmull.v2i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqdmull.v2i64")]
fn vqdmull_s32_(a: int32x2_t, b: int32x2_t) -> int64x2_t;
}
vqdmull_s32_(a, b)
}
/// Vector saturating doubling long multiply with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t {
vqdmull_s16(a, vdup_n_s16(b))
}
/// Vector saturating doubling long multiply with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t {
vqdmull_s32(a, vdup_n_s32(b))
}
/// Vector saturating doubling long multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmull_lane_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int32x4_t {
static_assert_imm2!(N);
let b: int16x4_t = simd_shuffle4!(b, b, <const N: i32> [N as u32, N as u32, N as u32, N as u32]);
vqdmull_s16(a, b)
}
/// Vector saturating doubling long multiply by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull, N = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmull_lane_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int64x2_t {
static_assert_imm1!(N);
let b: int32x2_t = simd_shuffle2!(b, b, <const N: i32> [N as u32, N as u32]);
vqdmull_s32(a, b)
}
/// Signed saturating doubling multiply-add long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t {
vqaddq_s32(a, vqdmull_s16(b, c))
}
/// Signed saturating doubling multiply-add long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t {
vqaddq_s64(a, vqdmull_s32(b, c))
}
/// Vector widening saturating doubling multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t {
vqaddq_s32(a, vqdmull_n_s16(b, c))
}
/// Vector widening saturating doubling multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t {
vqaddq_s64(a, vqdmull_n_s32(b, c))
}
/// Vector widening saturating doubling multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal, N = 2))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmlal_lane_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t {
static_assert_imm2!(N);
vqaddq_s32(a, vqdmull_lane_s16::<N>(b, c))
}
/// Vector widening saturating doubling multiply accumulate with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal, N = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmlal_lane_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t {
static_assert_imm1!(N);
vqaddq_s64(a, vqdmull_lane_s32::<N>(b, c))
}
/// Signed saturating doubling multiply-subtract long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t {
vqsubq_s32(a, vqdmull_s16(b, c))
}
/// Signed saturating doubling multiply-subtract long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t {
vqsubq_s64(a, vqdmull_s32(b, c))
}
/// Vector widening saturating doubling multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t {
vqsubq_s32(a, vqdmull_n_s16(b, c))
}
/// Vector widening saturating doubling multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t {
vqsubq_s64(a, vqdmull_n_s32(b, c))
}
/// Vector widening saturating doubling multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl, N = 2))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmlsl_lane_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t {
static_assert_imm2!(N);
vqsubq_s32(a, vqdmull_lane_s16::<N>(b, c))
}
/// Vector widening saturating doubling multiply subtract with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl, N = 1))]
#[rustc_legacy_const_generics(3)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmlsl_lane_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t {
static_assert_imm1!(N);
vqsubq_s64(a, vqdmull_lane_s32::<N>(b, c))
}
/// Signed saturating doubling multiply returning high half
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqdmulh.v4i16")]
fn vqdmulh_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t;
}
vqdmulh_s16_(a, b)
}
/// Signed saturating doubling multiply returning high half
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqdmulh.v8i16")]
fn vqdmulhq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t;
}
vqdmulhq_s16_(a, b)
}
/// Signed saturating doubling multiply returning high half
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqdmulh.v2i32")]
fn vqdmulh_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t;
}
vqdmulh_s32_(a, b)
}
/// Signed saturating doubling multiply returning high half
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqdmulh.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqdmulh.v4i32")]
fn vqdmulhq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t;
}
vqdmulhq_s32_(a, b)
}
/// Vector saturating doubling multiply high with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t {
let b: int16x4_t = vdup_n_s16(b);
vqdmulh_s16(a, b)
}
/// Vector saturating doubling multiply high with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t {
let b: int32x2_t = vdup_n_s32(b);
vqdmulh_s32(a, b)
}
/// Vector saturating doubling multiply high with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t {
let b: int16x8_t = vdupq_n_s16(b);
vqdmulhq_s16(a, b)
}
/// Vector saturating doubling multiply high with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t {
let b: int32x4_t = vdupq_n_s32(b);
vqdmulhq_s32(a, b)
}
/// Vector saturating doubling multiply high by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmulhq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
static_assert_imm3!(LANE);
vqdmulhq_s16(a, vdupq_n_s16(simd_extract(b, LANE as u32)))
}
/// Vector saturating doubling multiply high by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmulh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> int16x4_t {
static_assert_imm3!(LANE);
vqdmulh_s16(a, vdup_n_s16(simd_extract(b, LANE as u32)))
}
/// Vector saturating doubling multiply high by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmulhq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
static_assert_imm2!(LANE);
vqdmulhq_s32(a, vdupq_n_s32(simd_extract(b, LANE as u32)))
}
/// Vector saturating doubling multiply high by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmulh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> int32x2_t {
static_assert_imm2!(LANE);
vqdmulh_s32(a, vdup_n_s32(simd_extract(b, LANE as u32)))
}
/// Signed saturating extract narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqxtn.v8i8")]
fn vqmovn_s16_(a: int16x8_t) -> int8x8_t;
}
vqmovn_s16_(a)
}
/// Signed saturating extract narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqxtn.v4i16")]
fn vqmovn_s32_(a: int32x4_t) -> int16x4_t;
}
vqmovn_s32_(a)
}
/// Signed saturating extract narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovns.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqxtn.v2i32")]
fn vqmovn_s64_(a: int64x2_t) -> int32x2_t;
}
vqmovn_s64_(a)
}
/// Unsigned saturating extract narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqxtn.v8i8")]
fn vqmovn_u16_(a: uint16x8_t) -> uint8x8_t;
}
vqmovn_u16_(a)
}
/// Unsigned saturating extract narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqxtn.v4i16")]
fn vqmovn_u32_(a: uint32x4_t) -> uint16x4_t;
}
vqmovn_u32_(a)
}
/// Unsigned saturating extract narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnu.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqxtn.v2i32")]
fn vqmovn_u64_(a: uint64x2_t) -> uint32x2_t;
}
vqmovn_u64_(a)
}
/// Signed saturating extract unsigned narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqxtun.v8i8")]
fn vqmovun_s16_(a: int16x8_t) -> uint8x8_t;
}
vqmovun_s16_(a)
}
/// Signed saturating extract unsigned narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqxtun.v4i16")]
fn vqmovun_s32_(a: int32x4_t) -> uint16x4_t;
}
vqmovun_s32_(a)
}
/// Signed saturating extract unsigned narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqmovnsu.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqxtun.v2i32")]
fn vqmovun_s64_(a: int64x2_t) -> uint32x2_t;
}
vqmovun_s64_(a)
}
/// Signed saturating rounding doubling multiply returning high half
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrdmulh.v4i16")]
fn vqrdmulh_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t;
}
vqrdmulh_s16_(a, b)
}
/// Signed saturating rounding doubling multiply returning high half
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrdmulh.v8i16")]
fn vqrdmulhq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t;
}
vqrdmulhq_s16_(a, b)
}
/// Signed saturating rounding doubling multiply returning high half
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrdmulh.v2i32")]
fn vqrdmulh_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t;
}
vqrdmulh_s32_(a, b)
}
/// Signed saturating rounding doubling multiply returning high half
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrdmulh.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrdmulh.v4i32")]
fn vqrdmulhq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t;
}
vqrdmulhq_s32_(a, b)
}
/// Vector saturating rounding doubling multiply high with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t {
vqrdmulh_s16(a, vdup_n_s16(b))
}
/// Vector saturating rounding doubling multiply high with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t {
vqrdmulhq_s16(a, vdupq_n_s16(b))
}
/// Vector saturating rounding doubling multiply high with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t {
vqrdmulh_s32(a, vdup_n_s32(b))
}
/// Vector saturating rounding doubling multiply high with scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t {
vqrdmulhq_s32(a, vdupq_n_s32(b))
}
/// Vector rounding saturating doubling multiply high by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
static_assert_imm2!(LANE);
let b: int16x4_t = simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmulh_s16(a, b)
}
/// Vector rounding saturating doubling multiply high by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> int16x4_t {
static_assert_imm3!(LANE);
let b: int16x4_t = simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmulh_s16(a, b)
}
/// Vector rounding saturating doubling multiply high by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
static_assert_imm2!(LANE);
let b: int16x8_t = simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmulhq_s16(a, b)
}
/// Vector rounding saturating doubling multiply high by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulhq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
static_assert_imm3!(LANE);
let b: int16x8_t = simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmulhq_s16(a, b)
}
/// Vector rounding saturating doubling multiply high by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
static_assert_imm1!(LANE);
let b: int32x2_t = simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]);
vqrdmulh_s32(a, b)
}
/// Vector rounding saturating doubling multiply high by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> int32x2_t {
static_assert_imm2!(LANE);
let b: int32x2_t = simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]);
vqrdmulh_s32(a, b)
}
/// Vector rounding saturating doubling multiply high by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
static_assert_imm1!(LANE);
let b: int32x4_t = simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmulhq_s32(a, b)
}
/// Vector rounding saturating doubling multiply high by scalar
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulhq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
static_assert_imm2!(LANE);
let b: int32x4_t = simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmulhq_s32(a, b)
}
/// Signed saturating rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.v8i8")]
fn vqrshl_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t;
}
vqrshl_s8_(a, b)
}
/// Signed saturating rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.v16i8")]
fn vqrshlq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t;
}
vqrshlq_s8_(a, b)
}
/// Signed saturating rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.v4i16")]
fn vqrshl_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t;
}
vqrshl_s16_(a, b)
}
/// Signed saturating rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.v8i16")]
fn vqrshlq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t;
}
vqrshlq_s16_(a, b)
}
/// Signed saturating rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.v2i32")]
fn vqrshl_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t;
}
vqrshl_s32_(a, b)
}
/// Signed saturating rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.v4i32")]
fn vqrshlq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t;
}
vqrshlq_s32_(a, b)
}
/// Signed saturating rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v1i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.v1i64")]
fn vqrshl_s64_(a: int64x1_t, b: int64x1_t) -> int64x1_t;
}
vqrshl_s64_(a, b)
}
/// Signed saturating rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshifts.v2i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshl.v2i64")]
fn vqrshlq_s64_(a: int64x2_t, b: int64x2_t) -> int64x2_t;
}
vqrshlq_s64_(a, b)
}
/// Unsigned signed saturating rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.v8i8")]
fn vqrshl_u8_(a: uint8x8_t, b: int8x8_t) -> uint8x8_t;
}
vqrshl_u8_(a, b)
}
/// Unsigned signed saturating rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.v16i8")]
fn vqrshlq_u8_(a: uint8x16_t, b: int8x16_t) -> uint8x16_t;
}
vqrshlq_u8_(a, b)
}
/// Unsigned signed saturating rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.v4i16")]
fn vqrshl_u16_(a: uint16x4_t, b: int16x4_t) -> uint16x4_t;
}
vqrshl_u16_(a, b)
}
/// Unsigned signed saturating rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.v8i16")]
fn vqrshlq_u16_(a: uint16x8_t, b: int16x8_t) -> uint16x8_t;
}
vqrshlq_u16_(a, b)
}
/// Unsigned signed saturating rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.v2i32")]
fn vqrshl_u32_(a: uint32x2_t, b: int32x2_t) -> uint32x2_t;
}
vqrshl_u32_(a, b)
}
/// Unsigned signed saturating rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.v4i32")]
fn vqrshlq_u32_(a: uint32x4_t, b: int32x4_t) -> uint32x4_t;
}
vqrshlq_u32_(a, b)
}
/// Unsigned signed saturating rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v1i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.v1i64")]
fn vqrshl_u64_(a: uint64x1_t, b: int64x1_t) -> uint64x1_t;
}
vqrshl_u64_(a, b)
}
/// Unsigned signed saturating rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftu.v2i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshl.v2i64")]
fn vqrshlq_u64_(a: uint64x2_t, b: int64x2_t) -> uint64x2_t;
}
vqrshlq_u64_(a, b)
}
/// Signed saturating rounded shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")]
fn vqrshrn_n_s16_(a: int16x8_t, n: int16x8_t) -> int8x8_t;
}
vqrshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16))
}
/// Signed saturating rounded shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrn.v8i8")]
fn vqrshrn_n_s16_(a: int16x8_t, n: i32) -> int8x8_t;
}
vqrshrn_n_s16_(a, N)
}
/// Signed saturating rounded shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")]
fn vqrshrn_n_s32_(a: int32x4_t, n: int32x4_t) -> int16x4_t;
}
vqrshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
}
/// Signed saturating rounded shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrn.v4i16")]
fn vqrshrn_n_s32_(a: int32x4_t, n: i32) -> int16x4_t;
}
vqrshrn_n_s32_(a, N)
}
/// Signed saturating rounded shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")]
fn vqrshrn_n_s64_(a: int64x2_t, n: int64x2_t) -> int32x2_t;
}
vqrshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64))
}
/// Signed saturating rounded shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrn.v2i32")]
fn vqrshrn_n_s64_(a: int64x2_t, n: i32) -> int32x2_t;
}
vqrshrn_n_s64_(a, N)
}
/// Unsigned signed saturating rounded shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")]
fn vqrshrn_n_u16_(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t;
}
vqrshrn_n_u16_(a, uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16))
}
/// Unsigned signed saturating rounded shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshrn.v8i8")]
fn vqrshrn_n_u16_(a: uint16x8_t, n: i32) -> uint8x8_t;
}
vqrshrn_n_u16_(a, N)
}
/// Unsigned signed saturating rounded shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")]
fn vqrshrn_n_u32_(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t;
}
vqrshrn_n_u32_(a, uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32))
}
/// Unsigned signed saturating rounded shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshrn.v4i16")]
fn vqrshrn_n_u32_(a: uint32x4_t, n: i32) -> uint16x4_t;
}
vqrshrn_n_u32_(a, N)
}
/// Unsigned signed saturating rounded shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")]
fn vqrshrn_n_u64_(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t;
}
vqrshrn_n_u64_(a, uint64x2_t(-N as u64, -N as u64))
}
/// Unsigned signed saturating rounded shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshrn.v2i32")]
fn vqrshrn_n_u64_(a: uint64x2_t, n: i32) -> uint32x2_t;
}
vqrshrn_n_u64_(a, N)
}
/// Signed saturating rounded shift right unsigned narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqrshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrun_n_s16<const N: i32>(a: int16x8_t) -> uint8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")]
fn vqrshrun_n_s16_(a: int16x8_t, n: int16x8_t) -> uint8x8_t;
}
vqrshrun_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16))
}
/// Signed saturating rounded shift right unsigned narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrun_n_s16<const N: i32>(a: int16x8_t) -> uint8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrun.v8i8")]
fn vqrshrun_n_s16_(a: int16x8_t, n: i32) -> uint8x8_t;
}
vqrshrun_n_s16_(a, N)
}
/// Signed saturating rounded shift right unsigned narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqrshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrun_n_s32<const N: i32>(a: int32x4_t) -> uint16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")]
fn vqrshrun_n_s32_(a: int32x4_t, n: int32x4_t) -> uint16x4_t;
}
vqrshrun_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
}
/// Signed saturating rounded shift right unsigned narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrun_n_s32<const N: i32>(a: int32x4_t) -> uint16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrun.v4i16")]
fn vqrshrun_n_s32_(a: int32x4_t, n: i32) -> uint16x4_t;
}
vqrshrun_n_s32_(a, N)
}
/// Signed saturating rounded shift right unsigned narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqrshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrun_n_s64<const N: i32>(a: int64x2_t) -> uint32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")]
fn vqrshrun_n_s64_(a: int64x2_t, n: int64x2_t) -> uint32x2_t;
}
vqrshrun_n_s64_(a, int64x2_t(-N as i64, -N as i64))
}
/// Signed saturating rounded shift right unsigned narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrun_n_s64<const N: i32>(a: int64x2_t) -> uint32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrun.v2i32")]
fn vqrshrun_n_s64_(a: int64x2_t, n: i32) -> uint32x2_t;
}
vqrshrun_n_s64_(a, N)
}
/// Signed saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.v8i8")]
fn vqshl_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t;
}
vqshl_s8_(a, b)
}
/// Signed saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.v16i8")]
fn vqshlq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t;
}
vqshlq_s8_(a, b)
}
/// Signed saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.v4i16")]
fn vqshl_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t;
}
vqshl_s16_(a, b)
}
/// Signed saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.v8i16")]
fn vqshlq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t;
}
vqshlq_s16_(a, b)
}
/// Signed saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.v2i32")]
fn vqshl_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t;
}
vqshl_s32_(a, b)
}
/// Signed saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.v4i32")]
fn vqshlq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t;
}
vqshlq_s32_(a, b)
}
/// Signed saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v1i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.v1i64")]
fn vqshl_s64_(a: int64x1_t, b: int64x1_t) -> int64x1_t;
}
vqshl_s64_(a, b)
}
/// Signed saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshifts.v2i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshl.v2i64")]
fn vqshlq_s64_(a: int64x2_t, b: int64x2_t) -> int64x2_t;
}
vqshlq_s64_(a, b)
}
/// Unsigned saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.v8i8")]
fn vqshl_u8_(a: uint8x8_t, b: int8x8_t) -> uint8x8_t;
}
vqshl_u8_(a, b)
}
/// Unsigned saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.v16i8")]
fn vqshlq_u8_(a: uint8x16_t, b: int8x16_t) -> uint8x16_t;
}
vqshlq_u8_(a, b)
}
/// Unsigned saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.v4i16")]
fn vqshl_u16_(a: uint16x4_t, b: int16x4_t) -> uint16x4_t;
}
vqshl_u16_(a, b)
}
/// Unsigned saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.v8i16")]
fn vqshlq_u16_(a: uint16x8_t, b: int16x8_t) -> uint16x8_t;
}
vqshlq_u16_(a, b)
}
/// Unsigned saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.v2i32")]
fn vqshl_u32_(a: uint32x2_t, b: int32x2_t) -> uint32x2_t;
}
vqshl_u32_(a, b)
}
/// Unsigned saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.v4i32")]
fn vqshlq_u32_(a: uint32x4_t, b: int32x4_t) -> uint32x4_t;
}
vqshlq_u32_(a, b)
}
/// Unsigned saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v1i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.v1i64")]
fn vqshl_u64_(a: uint64x1_t, b: int64x1_t) -> uint64x1_t;
}
vqshl_u64_(a, b)
}
/// Unsigned saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftu.v2i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshl.v2i64")]
fn vqshlq_u64_(a: uint64x2_t, b: int64x2_t) -> uint64x2_t;
}
vqshlq_u64_(a, b)
}
/// Signed saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
static_assert_imm3!(N);
vqshl_s8(a, vdup_n_s8(N as _))
}
/// Signed saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
static_assert_imm3!(N);
vqshlq_s8(a, vdupq_n_s8(N as _))
}
/// Signed saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
static_assert_imm4!(N);
vqshl_s16(a, vdup_n_s16(N as _))
}
/// Signed saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
static_assert_imm4!(N);
vqshlq_s16(a, vdupq_n_s16(N as _))
}
/// Signed saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
static_assert_imm5!(N);
vqshl_s32(a, vdup_n_s32(N as _))
}
/// Signed saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
static_assert_imm5!(N);
vqshlq_s32(a, vdupq_n_s32(N as _))
}
/// Signed saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
static_assert_imm6!(N);
vqshl_s64(a, vdup_n_s64(N as _))
}
/// Signed saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
static_assert_imm6!(N);
vqshlq_s64(a, vdupq_n_s64(N as _))
}
/// Unsigned saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
static_assert_imm3!(N);
vqshl_u8(a, vdup_n_s8(N as _))
}
/// Unsigned saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
static_assert_imm3!(N);
vqshlq_u8(a, vdupq_n_s8(N as _))
}
/// Unsigned saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
static_assert_imm4!(N);
vqshl_u16(a, vdup_n_s16(N as _))
}
/// Unsigned saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
static_assert_imm4!(N);
vqshlq_u16(a, vdupq_n_s16(N as _))
}
/// Unsigned saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
static_assert_imm5!(N);
vqshl_u32(a, vdup_n_s32(N as _))
}
/// Unsigned saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
static_assert_imm5!(N);
vqshlq_u32(a, vdupq_n_s32(N as _))
}
/// Unsigned saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
static_assert_imm6!(N);
vqshl_u64(a, vdup_n_s64(N as _))
}
/// Unsigned saturating shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
static_assert_imm6!(N);
vqshlq_u64(a, vdupq_n_s64(N as _))
}
/// Signed saturating shift left unsigned
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshlu_n_s8<const N: i32>(a: int8x8_t) -> uint8x8_t {
static_assert_imm3!(N);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")]
fn vqshlu_n_s8_(a: int8x8_t, n: int8x8_t) -> uint8x8_t;
}
vqshlu_n_s8_(a, int8x8_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8))
}
/// Signed saturating shift left unsigned
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshlu_n_s8<const N: i32>(a: int8x8_t) -> uint8x8_t {
static_assert_imm3!(N);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v8i8")]
fn vqshlu_n_s8_(a: int8x8_t, n: int8x8_t) -> uint8x8_t;
}
vqshlu_n_s8_(a, int8x8_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8))
}
/// Signed saturating shift left unsigned
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshlu_n_s16<const N: i32>(a: int16x4_t) -> uint16x4_t {
static_assert_imm4!(N);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")]
fn vqshlu_n_s16_(a: int16x4_t, n: int16x4_t) -> uint16x4_t;
}
vqshlu_n_s16_(a, int16x4_t(N as i16, N as i16, N as i16, N as i16))
}
/// Signed saturating shift left unsigned
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshlu_n_s16<const N: i32>(a: int16x4_t) -> uint16x4_t {
static_assert_imm4!(N);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v4i16")]
fn vqshlu_n_s16_(a: int16x4_t, n: int16x4_t) -> uint16x4_t;
}
vqshlu_n_s16_(a, int16x4_t(N as i16, N as i16, N as i16, N as i16))
}
/// Signed saturating shift left unsigned
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshlu_n_s32<const N: i32>(a: int32x2_t) -> uint32x2_t {
static_assert_imm5!(N);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")]
fn vqshlu_n_s32_(a: int32x2_t, n: int32x2_t) -> uint32x2_t;
}
vqshlu_n_s32_(a, int32x2_t(N as i32, N as i32))
}
/// Signed saturating shift left unsigned
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshlu_n_s32<const N: i32>(a: int32x2_t) -> uint32x2_t {
static_assert_imm5!(N);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v2i32")]
fn vqshlu_n_s32_(a: int32x2_t, n: int32x2_t) -> uint32x2_t;
}
vqshlu_n_s32_(a, int32x2_t(N as i32, N as i32))
}
/// Signed saturating shift left unsigned
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshlu_n_s64<const N: i32>(a: int64x1_t) -> uint64x1_t {
static_assert_imm6!(N);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v1i64")]
fn vqshlu_n_s64_(a: int64x1_t, n: int64x1_t) -> uint64x1_t;
}
vqshlu_n_s64_(a, int64x1_t(N as i64))
}
/// Signed saturating shift left unsigned
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshlu_n_s64<const N: i32>(a: int64x1_t) -> uint64x1_t {
static_assert_imm6!(N);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v1i64")]
fn vqshlu_n_s64_(a: int64x1_t, n: int64x1_t) -> uint64x1_t;
}
vqshlu_n_s64_(a, int64x1_t(N as i64))
}
/// Signed saturating shift left unsigned
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshluq_n_s8<const N: i32>(a: int8x16_t) -> uint8x16_t {
static_assert_imm3!(N);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")]
fn vqshluq_n_s8_(a: int8x16_t, n: int8x16_t) -> uint8x16_t;
}
vqshluq_n_s8_(a, int8x16_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8))
}
/// Signed saturating shift left unsigned
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshluq_n_s8<const N: i32>(a: int8x16_t) -> uint8x16_t {
static_assert_imm3!(N);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v16i8")]
fn vqshluq_n_s8_(a: int8x16_t, n: int8x16_t) -> uint8x16_t;
}
vqshluq_n_s8_(a, int8x16_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N as i8))
}
/// Signed saturating shift left unsigned
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshluq_n_s16<const N: i32>(a: int16x8_t) -> uint16x8_t {
static_assert_imm4!(N);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")]
fn vqshluq_n_s16_(a: int16x8_t, n: int16x8_t) -> uint16x8_t;
}
vqshluq_n_s16_(a, int16x8_t(N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16))
}
/// Signed saturating shift left unsigned
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshluq_n_s16<const N: i32>(a: int16x8_t) -> uint16x8_t {
static_assert_imm4!(N);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v8i16")]
fn vqshluq_n_s16_(a: int16x8_t, n: int16x8_t) -> uint16x8_t;
}
vqshluq_n_s16_(a, int16x8_t(N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16, N as i16))
}
/// Signed saturating shift left unsigned
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshluq_n_s32<const N: i32>(a: int32x4_t) -> uint32x4_t {
static_assert_imm5!(N);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")]
fn vqshluq_n_s32_(a: int32x4_t, n: int32x4_t) -> uint32x4_t;
}
vqshluq_n_s32_(a, int32x4_t(N as i32, N as i32, N as i32, N as i32))
}
/// Signed saturating shift left unsigned
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshluq_n_s32<const N: i32>(a: int32x4_t) -> uint32x4_t {
static_assert_imm5!(N);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v4i32")]
fn vqshluq_n_s32_(a: int32x4_t, n: int32x4_t) -> uint32x4_t;
}
vqshluq_n_s32_(a, int32x4_t(N as i32, N as i32, N as i32, N as i32))
}
/// Signed saturating shift left unsigned
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshluq_n_s64<const N: i32>(a: int64x2_t) -> uint64x2_t {
static_assert_imm6!(N);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")]
fn vqshluq_n_s64_(a: int64x2_t, n: int64x2_t) -> uint64x2_t;
}
vqshluq_n_s64_(a, int64x2_t(N as i64, N as i64))
}
/// Signed saturating shift left unsigned
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshluq_n_s64<const N: i32>(a: int64x2_t) -> uint64x2_t {
static_assert_imm6!(N);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v2i64")]
fn vqshluq_n_s64_(a: int64x2_t, n: int64x2_t) -> uint64x2_t;
}
vqshluq_n_s64_(a, int64x2_t(N as i64, N as i64))
}
/// Signed saturating shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")]
fn vqshrn_n_s16_(a: int16x8_t, n: int16x8_t) -> int8x8_t;
}
vqshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16))
}
/// Signed saturating shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrn.v8i8")]
fn vqshrn_n_s16_(a: int16x8_t, n: i32) -> int8x8_t;
}
vqshrn_n_s16_(a, N)
}
/// Signed saturating shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")]
fn vqshrn_n_s32_(a: int32x4_t, n: int32x4_t) -> int16x4_t;
}
vqshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
}
/// Signed saturating shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrn.v4i16")]
fn vqshrn_n_s32_(a: int32x4_t, n: i32) -> int16x4_t;
}
vqshrn_n_s32_(a, N)
}
/// Signed saturating shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")]
fn vqshrn_n_s64_(a: int64x2_t, n: int64x2_t) -> int32x2_t;
}
vqshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64))
}
/// Signed saturating shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrn.v2i32")]
fn vqshrn_n_s64_(a: int64x2_t, n: i32) -> int32x2_t;
}
vqshrn_n_s64_(a, N)
}
/// Unsigned saturating shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")]
fn vqshrn_n_u16_(a: uint16x8_t, n: uint16x8_t) -> uint8x8_t;
}
vqshrn_n_u16_(a, uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16, -N as u16))
}
/// Unsigned saturating shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshrn.v8i8")]
fn vqshrn_n_u16_(a: uint16x8_t, n: i32) -> uint8x8_t;
}
vqshrn_n_u16_(a, N)
}
/// Unsigned saturating shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")]
fn vqshrn_n_u32_(a: uint32x4_t, n: uint32x4_t) -> uint16x4_t;
}
vqshrn_n_u32_(a, uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32))
}
/// Unsigned saturating shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshrn.v4i16")]
fn vqshrn_n_u32_(a: uint32x4_t, n: i32) -> uint16x4_t;
}
vqshrn_n_u32_(a, N)
}
/// Unsigned saturating shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")]
fn vqshrn_n_u64_(a: uint64x2_t, n: uint64x2_t) -> uint32x2_t;
}
vqshrn_n_u64_(a, uint64x2_t(-N as u64, -N as u64))
}
/// Unsigned saturating shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshrn.v2i32")]
fn vqshrn_n_u64_(a: uint64x2_t, n: i32) -> uint32x2_t;
}
vqshrn_n_u64_(a, N)
}
/// Signed saturating shift right unsigned narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrun_n_s16<const N: i32>(a: int16x8_t) -> uint8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")]
fn vqshrun_n_s16_(a: int16x8_t, n: int16x8_t) -> uint8x8_t;
}
vqshrun_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16))
}
/// Signed saturating shift right unsigned narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrun_n_s16<const N: i32>(a: int16x8_t) -> uint8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrun.v8i8")]
fn vqshrun_n_s16_(a: int16x8_t, n: i32) -> uint8x8_t;
}
vqshrun_n_s16_(a, N)
}
/// Signed saturating shift right unsigned narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrun_n_s32<const N: i32>(a: int32x4_t) -> uint16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")]
fn vqshrun_n_s32_(a: int32x4_t, n: int32x4_t) -> uint16x4_t;
}
vqshrun_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
}
/// Signed saturating shift right unsigned narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrun_n_s32<const N: i32>(a: int32x4_t) -> uint16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrun.v4i16")]
fn vqshrun_n_s32_(a: int32x4_t, n: i32) -> uint16x4_t;
}
vqshrun_n_s32_(a, N)
}
/// Signed saturating shift right unsigned narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vqshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrun_n_s64<const N: i32>(a: int64x2_t) -> uint32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")]
fn vqshrun_n_s64_(a: int64x2_t, n: int64x2_t) -> uint32x2_t;
}
vqshrun_n_s64_(a, int64x2_t(-N as i64, -N as i64))
}
/// Signed saturating shift right unsigned narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrun_n_s64<const N: i32>(a: int64x2_t) -> uint32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrun.v2i32")]
fn vqshrun_n_s64_(a: int64x2_t, n: i32) -> uint32x2_t;
}
vqshrun_n_s64_(a, N)
}
/// Reciprocal square-root estimate.
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrte))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frsqrte.v2f32")]
fn vrsqrte_f32_(a: float32x2_t) -> float32x2_t;
}
vrsqrte_f32_(a)
}
/// Reciprocal square-root estimate.
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrte))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frsqrte.v4f32")]
fn vrsqrteq_f32_(a: float32x4_t) -> float32x4_t;
}
vrsqrteq_f32_(a)
}
/// Unsigned reciprocal square root estimate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursqrte))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ursqrte.v2i32")]
fn vrsqrte_u32_(a: uint32x2_t) -> uint32x2_t;
}
vrsqrte_u32_(a)
}
/// Unsigned reciprocal square root estimate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursqrte))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrte.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ursqrte.v4i32")]
fn vrsqrteq_u32_(a: uint32x4_t) -> uint32x4_t;
}
vrsqrteq_u32_(a)
}
/// Floating-point reciprocal square root step
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrts))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frsqrts.v2f32")]
fn vrsqrts_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
}
vrsqrts_f32_(a, b)
}
/// Floating-point reciprocal square root step
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrts))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsqrts.v4f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frsqrts.v4f32")]
fn vrsqrtsq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
}
vrsqrtsq_f32_(a, b)
}
/// Reciprocal estimate.
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecpe))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecpe.v2f32")]
fn vrecpe_f32_(a: float32x2_t) -> float32x2_t;
}
vrecpe_f32_(a)
}
/// Reciprocal estimate.
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecpe))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecpe.v4f32")]
fn vrecpeq_f32_(a: float32x4_t) -> float32x4_t;
}
vrecpeq_f32_(a)
}
/// Unsigned reciprocal estimate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urecpe))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urecpe.v2i32")]
fn vrecpe_u32_(a: uint32x2_t) -> uint32x2_t;
}
vrecpe_u32_(a)
}
/// Unsigned reciprocal estimate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urecpe))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecpe.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urecpe.v4i32")]
fn vrecpeq_u32_(a: uint32x4_t) -> uint32x4_t;
}
vrecpeq_u32_(a)
}
/// Floating-point reciprocal step
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecps))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v2f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecps.v2f32")]
fn vrecps_f32_(a: float32x2_t, b: float32x2_t) -> float32x2_t;
}
vrecps_f32_(a, b)
}
/// Floating-point reciprocal step
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecps))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrecps.v4f32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.frecps.v4f32")]
fn vrecpsq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
}
vrecpsq_f32_(a, b)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t {
transmute(a)
}
/// Vector reinterpret cast operation
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t {
transmute(a)
}
/// Signed rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.v8i8")]
fn vrshl_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t;
}
vrshl_s8_(a, b)
}
/// Signed rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.v16i8")]
fn vrshlq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t;
}
vrshlq_s8_(a, b)
}
/// Signed rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.v4i16")]
fn vrshl_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t;
}
vrshl_s16_(a, b)
}
/// Signed rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.v8i16")]
fn vrshlq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t;
}
vrshlq_s16_(a, b)
}
/// Signed rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.v2i32")]
fn vrshl_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t;
}
vrshl_s32_(a, b)
}
/// Signed rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.v4i32")]
fn vrshlq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t;
}
vrshlq_s32_(a, b)
}
/// Signed rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v1i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.v1i64")]
fn vrshl_s64_(a: int64x1_t, b: int64x1_t) -> int64x1_t;
}
vrshl_s64_(a, b)
}
/// Signed rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshifts.v2i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.srshl.v2i64")]
fn vrshlq_s64_(a: int64x2_t, b: int64x2_t) -> int64x2_t;
}
vrshlq_s64_(a, b)
}
/// Unsigned rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.v8i8")]
fn vrshl_u8_(a: uint8x8_t, b: int8x8_t) -> uint8x8_t;
}
vrshl_u8_(a, b)
}
/// Unsigned rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.v16i8")]
fn vrshlq_u8_(a: uint8x16_t, b: int8x16_t) -> uint8x16_t;
}
vrshlq_u8_(a, b)
}
/// Unsigned rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.v4i16")]
fn vrshl_u16_(a: uint16x4_t, b: int16x4_t) -> uint16x4_t;
}
vrshl_u16_(a, b)
}
/// Unsigned rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.v8i16")]
fn vrshlq_u16_(a: uint16x8_t, b: int16x8_t) -> uint16x8_t;
}
vrshlq_u16_(a, b)
}
/// Unsigned rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.v2i32")]
fn vrshl_u32_(a: uint32x2_t, b: int32x2_t) -> uint32x2_t;
}
vrshl_u32_(a, b)
}
/// Unsigned rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.v4i32")]
fn vrshlq_u32_(a: uint32x4_t, b: int32x4_t) -> uint32x4_t;
}
vrshlq_u32_(a, b)
}
/// Unsigned rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v1i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.v1i64")]
fn vrshl_u64_(a: uint64x1_t, b: int64x1_t) -> uint64x1_t;
}
vrshl_u64_(a, b)
}
/// Unsigned rounding shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftu.v2i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.urshl.v2i64")]
fn vrshlq_u64_(a: uint64x2_t, b: int64x2_t) -> uint64x2_t;
}
vrshlq_u64_(a, b)
}
/// Signed rounding shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
vrshl_s8(a, vdup_n_s8((-N) as _))
}
/// Signed rounding shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
vrshlq_s8(a, vdupq_n_s8((-N) as _))
}
/// Signed rounding shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
vrshl_s16(a, vdup_n_s16((-N) as _))
}
/// Signed rounding shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
vrshlq_s16(a, vdupq_n_s16((-N) as _))
}
/// Signed rounding shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
vrshl_s32(a, vdup_n_s32((-N) as _))
}
/// Signed rounding shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
vrshlq_s32(a, vdupq_n_s32((-N) as _))
}
/// Signed rounding shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
static_assert!(N : i32 where N >= 1 && N <= 64);
vrshl_s64(a, vdup_n_s64((-N) as _))
}
/// Signed rounding shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
static_assert!(N : i32 where N >= 1 && N <= 64);
vrshlq_s64(a, vdupq_n_s64((-N) as _))
}
/// Unsigned rounding shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
vrshl_u8(a, vdup_n_s8((-N) as _))
}
/// Unsigned rounding shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
vrshlq_u8(a, vdupq_n_s8((-N) as _))
}
/// Unsigned rounding shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
vrshl_u16(a, vdup_n_s16((-N) as _))
}
/// Unsigned rounding shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
vrshlq_u16(a, vdupq_n_s16((-N) as _))
}
/// Unsigned rounding shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
vrshl_u32(a, vdup_n_s32((-N) as _))
}
/// Unsigned rounding shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
vrshlq_u32(a, vdupq_n_s32((-N) as _))
}
/// Unsigned rounding shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
static_assert!(N : i32 where N >= 1 && N <= 64);
vrshl_u64(a, vdup_n_s64((-N) as _))
}
/// Unsigned rounding shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
static_assert!(N : i32 where N >= 1 && N <= 64);
vrshlq_u64(a, vdupq_n_s64((-N) as _))
}
/// Rounding shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vrshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")]
fn vrshrn_n_s16_(a: int16x8_t, n: int16x8_t) -> int8x8_t;
}
vrshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16, -N as i16))
}
/// Rounding shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rshrn.v8i8")]
fn vrshrn_n_s16_(a: int16x8_t, n: i32) -> int8x8_t;
}
vrshrn_n_s16_(a, N)
}
/// Rounding shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vrshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")]
fn vrshrn_n_s32_(a: int32x4_t, n: int32x4_t) -> int16x4_t;
}
vrshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
}
/// Rounding shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rshrn.v4i16")]
fn vrshrn_n_s32_(a: int32x4_t, n: i32) -> int16x4_t;
}
vrshrn_n_s32_(a, N)
}
/// Rounding shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)
#[inline]
#[cfg(target_arch = "arm")]
#[target_feature(enable = "neon,v7")]
#[cfg_attr(test, assert_instr(vrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vrshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")]
fn vrshrn_n_s64_(a: int64x2_t, n: int64x2_t) -> int32x2_t;
}
vrshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64))
}
/// Rounding shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)
#[inline]
#[cfg(target_arch = "aarch64")]
#[target_feature(enable = "neon")]
#[cfg_attr(test, assert_instr(rshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rshrn.v2i32")]
fn vrshrn_n_s64_(a: int64x2_t, n: i32) -> int32x2_t;
}
vrshrn_n_s64_(a, N)
}
/// Rounding shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
transmute(vrshrn_n_s16::<N>(transmute(a)))
}
/// Rounding shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
transmute(vrshrn_n_s32::<N>(transmute(a)))
}
/// Rounding shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
transmute(vrshrn_n_s64::<N>(transmute(a)))
}
/// Signed rounding shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsra_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
simd_add(a, vrshr_n_s8::<N>(b))
}
/// Signed rounding shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsraq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
simd_add(a, vrshrq_n_s8::<N>(b))
}
/// Signed rounding shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsra_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
simd_add(a, vrshr_n_s16::<N>(b))
}
/// Signed rounding shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsraq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
simd_add(a, vrshrq_n_s16::<N>(b))
}
/// Signed rounding shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsra_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
simd_add(a, vrshr_n_s32::<N>(b))
}
/// Signed rounding shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsraq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
simd_add(a, vrshrq_n_s32::<N>(b))
}
/// Signed rounding shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsra_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
static_assert!(N : i32 where N >= 1 && N <= 64);
simd_add(a, vrshr_n_s64::<N>(b))
}
/// Signed rounding shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsraq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
static_assert!(N : i32 where N >= 1 && N <= 64);
simd_add(a, vrshrq_n_s64::<N>(b))
}
/// Unsigned rounding shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsra_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
simd_add(a, vrshr_n_u8::<N>(b))
}
/// Unsigned rounding shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsraq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
simd_add(a, vrshrq_n_u8::<N>(b))
}
/// Unsigned rounding shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsra_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
simd_add(a, vrshr_n_u16::<N>(b))
}
/// Unsigned rounding shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsraq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
simd_add(a, vrshrq_n_u16::<N>(b))
}
/// Unsigned rounding shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsra_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
simd_add(a, vrshr_n_u32::<N>(b))
}
/// Unsigned rounding shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsraq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
simd_add(a, vrshrq_n_u32::<N>(b))
}
/// Unsigned rounding shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsra_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
static_assert!(N : i32 where N >= 1 && N <= 64);
simd_add(a, vrshr_n_u64::<N>(b))
}
/// Unsigned rounding shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsraq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
static_assert!(N : i32 where N >= 1 && N <= 64);
simd_add(a, vrshrq_n_u64::<N>(b))
}
/// Rounding subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rsubhn.v8i8")]
fn vrsubhn_s16_(a: int16x8_t, b: int16x8_t) -> int8x8_t;
}
vrsubhn_s16_(a, b)
}
/// Rounding subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rsubhn.v4i16")]
fn vrsubhn_s32_(a: int32x4_t, b: int32x4_t) -> int16x4_t;
}
vrsubhn_s32_(a, b)
}
/// Rounding subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrsubhn.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rsubhn.v2i32")]
fn vrsubhn_s64_(a: int64x2_t, b: int64x2_t) -> int32x2_t;
}
vrsubhn_s64_(a, b)
}
/// Rounding subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t {
transmute(vrsubhn_s16(transmute(a), transmute(b)))
}
/// Rounding subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t {
transmute(vrsubhn_s32(transmute(a), transmute(b)))
}
/// Rounding subtract returning high narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t {
transmute(vrsubhn_s64(transmute(a), transmute(b)))
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_s8<const LANE: i32>(a: i8, b: int8x8_t) -> int8x8_t {
static_assert_imm3!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> int16x4_t {
static_assert_imm2!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> int32x2_t {
static_assert_imm1!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_s64<const LANE: i32>(a: i64, b: int64x1_t) -> int64x1_t {
static_assert!(LANE : i32 where LANE == 0);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_u8<const LANE: i32>(a: u8, b: uint8x8_t) -> uint8x8_t {
static_assert_imm3!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_u16<const LANE: i32>(a: u16, b: uint16x4_t) -> uint16x4_t {
static_assert_imm2!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_u32<const LANE: i32>(a: u32, b: uint32x2_t) -> uint32x2_t {
static_assert_imm1!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_u64<const LANE: i32>(a: u64, b: uint64x1_t) -> uint64x1_t {
static_assert!(LANE : i32 where LANE == 0);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_p8<const LANE: i32>(a: p8, b: poly8x8_t) -> poly8x8_t {
static_assert_imm3!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_p16<const LANE: i32>(a: p16, b: poly16x4_t) -> poly16x4_t {
static_assert_imm2!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_p64<const LANE: i32>(a: p64, b: poly64x1_t) -> poly64x1_t {
static_assert!(LANE : i32 where LANE == 0);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_s8<const LANE: i32>(a: i8, b: int8x16_t) -> int8x16_t {
static_assert_imm4!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_s16<const LANE: i32>(a: i16, b: int16x8_t) -> int16x8_t {
static_assert_imm3!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_s32<const LANE: i32>(a: i32, b: int32x4_t) -> int32x4_t {
static_assert_imm2!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_s64<const LANE: i32>(a: i64, b: int64x2_t) -> int64x2_t {
static_assert_imm1!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_u8<const LANE: i32>(a: u8, b: uint8x16_t) -> uint8x16_t {
static_assert_imm4!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_u16<const LANE: i32>(a: u16, b: uint16x8_t) -> uint16x8_t {
static_assert_imm3!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_u32<const LANE: i32>(a: u32, b: uint32x4_t) -> uint32x4_t {
static_assert_imm2!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_u64<const LANE: i32>(a: u64, b: uint64x2_t) -> uint64x2_t {
static_assert_imm1!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_p8<const LANE: i32>(a: p8, b: poly8x16_t) -> poly8x16_t {
static_assert_imm4!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_p16<const LANE: i32>(a: p16, b: poly16x8_t) -> poly16x8_t {
static_assert_imm3!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_p64<const LANE: i32>(a: p64, b: poly64x2_t) -> poly64x2_t {
static_assert_imm1!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> float32x2_t {
static_assert_imm1!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Insert vector element from another vector element
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_f32<const LANE: i32>(a: f32, b: float32x4_t) -> float32x4_t {
static_assert_imm2!(LANE);
simd_insert(b, LANE as u32, a)
}
/// Signed Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sshl.v8i8")]
fn vshl_s8_(a: int8x8_t, b: int8x8_t) -> int8x8_t;
}
vshl_s8_(a, b)
}
/// Signed Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sshl.v16i8")]
fn vshlq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t;
}
vshlq_s8_(a, b)
}
/// Signed Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sshl.v4i16")]
fn vshl_s16_(a: int16x4_t, b: int16x4_t) -> int16x4_t;
}
vshl_s16_(a, b)
}
/// Signed Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sshl.v8i16")]
fn vshlq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t;
}
vshlq_s16_(a, b)
}
/// Signed Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sshl.v2i32")]
fn vshl_s32_(a: int32x2_t, b: int32x2_t) -> int32x2_t;
}
vshl_s32_(a, b)
}
/// Signed Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sshl.v4i32")]
fn vshlq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t;
}
vshlq_s32_(a, b)
}
/// Signed Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v1i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sshl.v1i64")]
fn vshl_s64_(a: int64x1_t, b: int64x1_t) -> int64x1_t;
}
vshl_s64_(a, b)
}
/// Signed Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshifts.v2i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sshl.v2i64")]
fn vshlq_s64_(a: int64x2_t, b: int64x2_t) -> int64x2_t;
}
vshlq_s64_(a, b)
}
/// Unsigned Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ushl.v8i8")]
fn vshl_u8_(a: uint8x8_t, b: int8x8_t) -> uint8x8_t;
}
vshl_u8_(a, b)
}
/// Unsigned Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ushl.v16i8")]
fn vshlq_u8_(a: uint8x16_t, b: int8x16_t) -> uint8x16_t;
}
vshlq_u8_(a, b)
}
/// Unsigned Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ushl.v4i16")]
fn vshl_u16_(a: uint16x4_t, b: int16x4_t) -> uint16x4_t;
}
vshl_u16_(a, b)
}
/// Unsigned Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ushl.v8i16")]
fn vshlq_u16_(a: uint16x8_t, b: int16x8_t) -> uint16x8_t;
}
vshlq_u16_(a, b)
}
/// Unsigned Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ushl.v2i32")]
fn vshl_u32_(a: uint32x2_t, b: int32x2_t) -> uint32x2_t;
}
vshl_u32_(a, b)
}
/// Unsigned Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ushl.v4i32")]
fn vshlq_u32_(a: uint32x4_t, b: int32x4_t) -> uint32x4_t;
}
vshlq_u32_(a, b)
}
/// Unsigned Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v1i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ushl.v1i64")]
fn vshl_u64_(a: uint64x1_t, b: int64x1_t) -> uint64x1_t;
}
vshl_u64_(a, b)
}
/// Unsigned Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vshiftu.v2i64")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ushl.v2i64")]
fn vshlq_u64_(a: uint64x2_t, b: int64x2_t) -> uint64x2_t;
}
vshlq_u64_(a, b)
}
/// Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
static_assert_imm3!(N);
simd_shl(a, vdup_n_s8(N as _))
}
/// Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
static_assert_imm3!(N);
simd_shl(a, vdupq_n_s8(N as _))
}
/// Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
static_assert_imm4!(N);
simd_shl(a, vdup_n_s16(N as _))
}
/// Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
static_assert_imm4!(N);
simd_shl(a, vdupq_n_s16(N as _))
}
/// Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
static_assert_imm5!(N);
simd_shl(a, vdup_n_s32(N as _))
}
/// Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
static_assert_imm5!(N);
simd_shl(a, vdupq_n_s32(N as _))
}
/// Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
static_assert_imm3!(N);
simd_shl(a, vdup_n_u8(N as _))
}
/// Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
static_assert_imm3!(N);
simd_shl(a, vdupq_n_u8(N as _))
}
/// Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
static_assert_imm4!(N);
simd_shl(a, vdup_n_u16(N as _))
}
/// Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
static_assert_imm4!(N);
simd_shl(a, vdupq_n_u16(N as _))
}
/// Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
static_assert_imm5!(N);
simd_shl(a, vdup_n_u32(N as _))
}
/// Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
static_assert_imm5!(N);
simd_shl(a, vdupq_n_u32(N as _))
}
/// Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
static_assert_imm6!(N);
simd_shl(a, vdup_n_s64(N as _))
}
/// Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
static_assert_imm6!(N);
simd_shl(a, vdupq_n_s64(N as _))
}
/// Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
static_assert_imm6!(N);
simd_shl(a, vdup_n_u64(N as _))
}
/// Shift left
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
static_assert_imm6!(N);
simd_shl(a, vdupq_n_u64(N as _))
}
/// Signed shift left long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s8", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshll, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshll_n_s8<const N: i32>(a: int8x8_t) -> int16x8_t {
static_assert!(N : i32 where N >= 0 && N <= 8);
simd_shl(simd_cast(a), vdupq_n_s16(N as _))
}
/// Signed shift left long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s16", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshll, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshll_n_s16<const N: i32>(a: int16x4_t) -> int32x4_t {
static_assert!(N : i32 where N >= 0 && N <= 16);
simd_shl(simd_cast(a), vdupq_n_s32(N as _))
}
/// Signed shift left long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s32", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshll, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshll_n_s32<const N: i32>(a: int32x2_t) -> int64x2_t {
static_assert!(N : i32 where N >= 0 && N <= 32);
simd_shl(simd_cast(a), vdupq_n_s64(N as _))
}
/// Signed shift left long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u8", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushll, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshll_n_u8<const N: i32>(a: uint8x8_t) -> uint16x8_t {
static_assert!(N : i32 where N >= 0 && N <= 8);
simd_shl(simd_cast(a), vdupq_n_u16(N as _))
}
/// Signed shift left long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u16", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushll, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshll_n_u16<const N: i32>(a: uint16x4_t) -> uint32x4_t {
static_assert!(N : i32 where N >= 0 && N <= 16);
simd_shl(simd_cast(a), vdupq_n_u32(N as _))
}
/// Signed shift left long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u32", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushll, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshll_n_u32<const N: i32>(a: uint32x2_t) -> uint64x2_t {
static_assert!(N : i32 where N >= 0 && N <= 32);
simd_shl(simd_cast(a), vdupq_n_u64(N as _))
}
/// Shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
let n: i32 = if N == 8 { 7 } else { N };
simd_shr(a, vdup_n_s8(n as _))
}
/// Shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
let n: i32 = if N == 8 { 7 } else { N };
simd_shr(a, vdupq_n_s8(n as _))
}
/// Shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
let n: i32 = if N == 16 { 15 } else { N };
simd_shr(a, vdup_n_s16(n as _))
}
/// Shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
let n: i32 = if N == 16 { 15 } else { N };
simd_shr(a, vdupq_n_s16(n as _))
}
/// Shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
let n: i32 = if N == 32 { 31 } else { N };
simd_shr(a, vdup_n_s32(n as _))
}
/// Shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
let n: i32 = if N == 32 { 31 } else { N };
simd_shr(a, vdupq_n_s32(n as _))
}
/// Shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
static_assert!(N : i32 where N >= 1 && N <= 64);
let n: i32 = if N == 64 { 63 } else { N };
simd_shr(a, vdup_n_s64(n as _))
}
/// Shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
static_assert!(N : i32 where N >= 1 && N <= 64);
let n: i32 = if N == 64 { 63 } else { N };
simd_shr(a, vdupq_n_s64(n as _))
}
/// Shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
let n: i32 = if N == 8 { return vdup_n_u8(0); } else { N };
simd_shr(a, vdup_n_u8(n as _))
}
/// Shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
let n: i32 = if N == 8 { return vdupq_n_u8(0); } else { N };
simd_shr(a, vdupq_n_u8(n as _))
}
/// Shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
let n: i32 = if N == 16 { return vdup_n_u16(0); } else { N };
simd_shr(a, vdup_n_u16(n as _))
}
/// Shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
let n: i32 = if N == 16 { return vdupq_n_u16(0); } else { N };
simd_shr(a, vdupq_n_u16(n as _))
}
/// Shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
let n: i32 = if N == 32 { return vdup_n_u32(0); } else { N };
simd_shr(a, vdup_n_u32(n as _))
}
/// Shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
let n: i32 = if N == 32 { return vdupq_n_u32(0); } else { N };
simd_shr(a, vdupq_n_u32(n as _))
}
/// Shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
static_assert!(N : i32 where N >= 1 && N <= 64);
let n: i32 = if N == 64 { return vdup_n_u64(0); } else { N };
simd_shr(a, vdup_n_u64(n as _))
}
/// Shift right
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
static_assert!(N : i32 where N >= 1 && N <= 64);
let n: i32 = if N == 64 { return vdupq_n_u64(0); } else { N };
simd_shr(a, vdupq_n_u64(n as _))
}
/// Shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
simd_cast(simd_shr(a, vdupq_n_s16(N as _)))
}
/// Shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
simd_cast(simd_shr(a, vdupq_n_s32(N as _)))
}
/// Shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
simd_cast(simd_shr(a, vdupq_n_s64(N as _)))
}
/// Shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
simd_cast(simd_shr(a, vdupq_n_u16(N as _)))
}
/// Shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
simd_cast(simd_shr(a, vdupq_n_u32(N as _)))
}
/// Shift right narrow
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))]
#[rustc_legacy_const_generics(1)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
simd_cast(simd_shr(a, vdupq_n_u64(N as _)))
}
/// Signed shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsra_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
simd_add(a, vshr_n_s8::<N>(b))
}
/// Signed shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsraq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
simd_add(a, vshrq_n_s8::<N>(b))
}
/// Signed shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsra_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
simd_add(a, vshr_n_s16::<N>(b))
}
/// Signed shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsraq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
simd_add(a, vshrq_n_s16::<N>(b))
}
/// Signed shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsra_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
simd_add(a, vshr_n_s32::<N>(b))
}
/// Signed shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsraq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
simd_add(a, vshrq_n_s32::<N>(b))
}
/// Signed shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsra_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
static_assert!(N : i32 where N >= 1 && N <= 64);
simd_add(a, vshr_n_s64::<N>(b))
}
/// Signed shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsraq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
static_assert!(N : i32 where N >= 1 && N <= 64);
simd_add(a, vshrq_n_s64::<N>(b))
}
/// Unsigned shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsra_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
simd_add(a, vshr_n_u8::<N>(b))
}
/// Unsigned shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsraq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
static_assert!(N : i32 where N >= 1 && N <= 8);
simd_add(a, vshrq_n_u8::<N>(b))
}
/// Unsigned shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsra_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
simd_add(a, vshr_n_u16::<N>(b))
}
/// Unsigned shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsraq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
static_assert!(N : i32 where N >= 1 && N <= 16);
simd_add(a, vshrq_n_u16::<N>(b))
}
/// Unsigned shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsra_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
simd_add(a, vshr_n_u32::<N>(b))
}
/// Unsigned shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsraq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
static_assert!(N : i32 where N >= 1 && N <= 32);
simd_add(a, vshrq_n_u32::<N>(b))
}
/// Unsigned shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsra_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
static_assert!(N : i32 where N >= 1 && N <= 64);
simd_add(a, vshr_n_u64::<N>(b))
}
/// Unsigned shift right and accumulate
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u64)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsraq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
static_assert!(N : i32 where N >= 1 && N <= 64);
simd_add(a, vshrq_n_u64::<N>(b))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t {
let a1: int8x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
let b1: int8x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
transmute((a1, b1))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t {
let a1: int16x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]);
let b1: int16x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]);
transmute((a1, b1))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t {
let a1: int8x16_t = simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]);
let b1: int8x16_t = simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]);
transmute((a1, b1))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t {
let a1: int16x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
let b1: int16x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
transmute((a1, b1))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t {
let a1: int32x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]);
let b1: int32x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]);
transmute((a1, b1))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t {
let a1: uint8x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
let b1: uint8x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
transmute((a1, b1))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t {
let a1: uint16x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]);
let b1: uint16x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]);
transmute((a1, b1))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t {
let a1: uint8x16_t = simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]);
let b1: uint8x16_t = simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]);
transmute((a1, b1))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t {
let a1: uint16x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
let b1: uint16x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
transmute((a1, b1))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t {
let a1: uint32x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]);
let b1: uint32x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]);
transmute((a1, b1))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t {
let a1: poly8x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
let b1: poly8x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
transmute((a1, b1))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t {
let a1: poly16x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]);
let b1: poly16x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]);
transmute((a1, b1))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t {
let a1: poly8x16_t = simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]);
let b1: poly8x16_t = simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]);
transmute((a1, b1))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t {
let a1: poly16x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
let b1: poly16x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
transmute((a1, b1))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t {
let a1: int32x2_t = simd_shuffle2!(a, b, [0, 2]);
let b1: int32x2_t = simd_shuffle2!(a, b, [1, 3]);
transmute((a1, b1))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t {
let a1: uint32x2_t = simd_shuffle2!(a, b, [0, 2]);
let b1: uint32x2_t = simd_shuffle2!(a, b, [1, 3]);
transmute((a1, b1))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t {
let a1: float32x2_t = simd_shuffle2!(a, b, [0, 2]);
let b1: float32x2_t = simd_shuffle2!(a, b, [1, 3]);
transmute((a1, b1))
}
/// Transpose elements
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t {
let a1: float32x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]);
let b1: float32x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]);
transmute((a1, b1))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t {
let a0: int8x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
let b0: int8x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
transmute((a0, b0))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t {
let a0: int16x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]);
let b0: int16x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]);
transmute((a0, b0))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t {
let a0: uint8x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
let b0: uint8x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
transmute((a0, b0))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t {
let a0: uint16x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]);
let b0: uint16x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]);
transmute((a0, b0))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t {
let a0: poly8x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
let b0: poly8x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
transmute((a0, b0))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t {
let a0: poly16x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]);
let b0: poly16x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]);
transmute((a0, b0))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t {
let a0: int32x2_t = simd_shuffle2!(a, b, [0, 2]);
let b0: int32x2_t = simd_shuffle2!(a, b, [1, 3]);
transmute((a0, b0))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t {
let a0: uint32x2_t = simd_shuffle2!(a, b, [0, 2]);
let b0: uint32x2_t = simd_shuffle2!(a, b, [1, 3]);
transmute((a0, b0))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t {
let a0: int8x16_t = simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]);
let b0: int8x16_t = simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]);
transmute((a0, b0))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t {
let a0: int16x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
let b0: int16x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
transmute((a0, b0))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t {
let a0: int32x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]);
let b0: int32x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]);
transmute((a0, b0))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t {
let a0: uint8x16_t = simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]);
let b0: uint8x16_t = simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]);
transmute((a0, b0))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t {
let a0: uint16x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
let b0: uint16x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
transmute((a0, b0))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t {
let a0: uint32x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]);
let b0: uint32x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]);
transmute((a0, b0))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t {
let a0: poly8x16_t = simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]);
let b0: poly8x16_t = simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]);
transmute((a0, b0))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t {
let a0: poly16x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
let b0: poly16x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
transmute((a0, b0))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t {
let a0: float32x2_t = simd_shuffle2!(a, b, [0, 2]);
let b0: float32x2_t = simd_shuffle2!(a, b, [1, 3]);
transmute((a0, b0))
}
/// Zip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t {
let a0: float32x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]);
let b0: float32x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t {
let a0: int8x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
let b0: int8x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t {
let a0: int16x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]);
let b0: int16x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t {
let a0: int8x16_t = simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]);
let b0: int8x16_t = simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t {
let a0: int16x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
let b0: int16x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t {
let a0: int32x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]);
let b0: int32x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t {
let a0: uint8x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
let b0: uint8x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t {
let a0: uint16x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]);
let b0: uint16x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t {
let a0: uint8x16_t = simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]);
let b0: uint8x16_t = simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t {
let a0: uint16x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
let b0: uint16x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t {
let a0: uint32x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]);
let b0: uint32x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t {
let a0: poly8x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
let b0: poly8x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t {
let a0: poly16x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]);
let b0: poly16x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t {
let a0: poly8x16_t = simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]);
let b0: poly8x16_t = simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t {
let a0: poly16x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
let b0: poly16x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t {
let a0: int32x2_t = simd_shuffle2!(a, b, [0, 2]);
let b0: int32x2_t = simd_shuffle2!(a, b, [1, 3]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t {
let a0: uint32x2_t = simd_shuffle2!(a, b, [0, 2]);
let b0: uint32x2_t = simd_shuffle2!(a, b, [1, 3]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t {
let a0: float32x2_t = simd_shuffle2!(a, b, [0, 2]);
let b0: float32x2_t = simd_shuffle2!(a, b, [1, 3]);
transmute((a0, b0))
}
/// Unzip vectors
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t {
let a0: float32x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]);
let b0: float32x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]);
transmute((a0, b0))
}
/// Unsigned Absolute difference and Accumulate Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t {
let d: uint8x8_t = vabd_u8(b, c);
simd_add(a, simd_cast(d))
}
/// Unsigned Absolute difference and Accumulate Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t {
let d: uint16x4_t = vabd_u16(b, c);
simd_add(a, simd_cast(d))
}
/// Unsigned Absolute difference and Accumulate Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t {
let d: uint32x2_t = vabd_u32(b, c);
simd_add(a, simd_cast(d))
}
/// Signed Absolute difference and Accumulate Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t {
let d: int8x8_t = vabd_s8(b, c);
let e: uint8x8_t = simd_cast(d);
simd_add(a, simd_cast(e))
}
/// Signed Absolute difference and Accumulate Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t {
let d: int16x4_t = vabd_s16(b, c);
let e: uint16x4_t = simd_cast(d);
simd_add(a, simd_cast(e))
}
/// Signed Absolute difference and Accumulate Long
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabal))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t {
let d: int32x2_t = vabd_s32(b, c);
let e: uint32x2_t = simd_cast(d);
simd_add(a, simd_cast(e))
}
/// Singned saturating Absolute value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.v8i8")]
fn vqabs_s8_(a: int8x8_t) -> int8x8_t;
}
vqabs_s8_(a)
}
/// Singned saturating Absolute value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s8)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v16i8")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.v16i8")]
fn vqabsq_s8_(a: int8x16_t) -> int8x16_t;
}
vqabsq_s8_(a)
}
/// Singned saturating Absolute value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.v4i16")]
fn vqabs_s16_(a: int16x4_t) -> int16x4_t;
}
vqabs_s16_(a)
}
/// Singned saturating Absolute value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s16)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v8i16")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.v8i16")]
fn vqabsq_s16_(a: int16x8_t) -> int16x8_t;
}
vqabsq_s16_(a)
}
/// Singned saturating Absolute value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v2i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.v2i32")]
fn vqabs_s32_(a: int32x2_t) -> int32x2_t;
}
vqabs_s32_(a)
}
/// Singned saturating Absolute value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s32)
#[inline]
#[target_feature(enable = "neon")]
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))]
#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t {
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqabs.v4i32")]
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqabs.v4i32")]
fn vqabsq_s32_(a: int32x4_t) -> int32x4_t;
}
vqabsq_s32_(a)
}
#[cfg(test)]
#[allow(overflowing_literals)]
mod test {
use super::*;
use crate::core_arch::simd::*;
use std::mem::transmute;
use stdarch_test::simd_test;
#[simd_test(enable = "neon")]
unsafe fn test_vand_s8() {
let a: i8x8 = i8x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: i8x8 = i8x8::new(0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F);
let e: i8x8 = i8x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let r: i8x8 = transmute(vand_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: i8x8 = i8x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: i8x8 = i8x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: i8x8 = i8x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let r: i8x8 = transmute(vand_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vandq_s8() {
let a: i8x16 = i8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x00);
let b: i8x16 = i8x16::new(0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F);
let e: i8x16 = i8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x00);
let r: i8x16 = transmute(vandq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: i8x16 = i8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x00);
let b: i8x16 = i8x16::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: i8x16 = i8x16::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let r: i8x16 = transmute(vandq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vand_s16() {
let a: i16x4 = i16x4::new(0x00, 0x01, 0x02, 0x03);
let b: i16x4 = i16x4::new(0x0F, 0x0F, 0x0F, 0x0F);
let e: i16x4 = i16x4::new(0x00, 0x01, 0x02, 0x03);
let r: i16x4 = transmute(vand_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: i16x4 = i16x4::new(0x00, 0x01, 0x02, 0x03);
let b: i16x4 = i16x4::new(0x00, 0x00, 0x00, 0x00);
let e: i16x4 = i16x4::new(0x00, 0x00, 0x00, 0x00);
let r: i16x4 = transmute(vand_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vandq_s16() {
let a: i16x8 = i16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: i16x8 = i16x8::new(0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F);
let e: i16x8 = i16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let r: i16x8 = transmute(vandq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: i16x8 = i16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: i16x8 = i16x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: i16x8 = i16x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let r: i16x8 = transmute(vandq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vand_s32() {
let a: i32x2 = i32x2::new(0x00, 0x01);
let b: i32x2 = i32x2::new(0x0F, 0x0F);
let e: i32x2 = i32x2::new(0x00, 0x01);
let r: i32x2 = transmute(vand_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: i32x2 = i32x2::new(0x00, 0x01);
let b: i32x2 = i32x2::new(0x00, 0x00);
let e: i32x2 = i32x2::new(0x00, 0x00);
let r: i32x2 = transmute(vand_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vandq_s32() {
let a: i32x4 = i32x4::new(0x00, 0x01, 0x02, 0x03);
let b: i32x4 = i32x4::new(0x0F, 0x0F, 0x0F, 0x0F);
let e: i32x4 = i32x4::new(0x00, 0x01, 0x02, 0x03);
let r: i32x4 = transmute(vandq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: i32x4 = i32x4::new(0x00, 0x01, 0x02, 0x03);
let b: i32x4 = i32x4::new(0x00, 0x00, 0x00, 0x00);
let e: i32x4 = i32x4::new(0x00, 0x00, 0x00, 0x00);
let r: i32x4 = transmute(vandq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vand_u8() {
let a: u8x8 = u8x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: u8x8 = u8x8::new(0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F);
let e: u8x8 = u8x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let r: u8x8 = transmute(vand_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: u8x8 = u8x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: u8x8 = u8x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: u8x8 = u8x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let r: u8x8 = transmute(vand_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vandq_u8() {
let a: u8x16 = u8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x00);
let b: u8x16 = u8x16::new(0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F);
let e: u8x16 = u8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x00);
let r: u8x16 = transmute(vandq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: u8x16 = u8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x00);
let b: u8x16 = u8x16::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: u8x16 = u8x16::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let r: u8x16 = transmute(vandq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vand_u16() {
let a: u16x4 = u16x4::new(0x00, 0x01, 0x02, 0x03);
let b: u16x4 = u16x4::new(0x0F, 0x0F, 0x0F, 0x0F);
let e: u16x4 = u16x4::new(0x00, 0x01, 0x02, 0x03);
let r: u16x4 = transmute(vand_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: u16x4 = u16x4::new(0x00, 0x01, 0x02, 0x03);
let b: u16x4 = u16x4::new(0x00, 0x00, 0x00, 0x00);
let e: u16x4 = u16x4::new(0x00, 0x00, 0x00, 0x00);
let r: u16x4 = transmute(vand_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vandq_u16() {
let a: u16x8 = u16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: u16x8 = u16x8::new(0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F);
let e: u16x8 = u16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let r: u16x8 = transmute(vandq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: u16x8 = u16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: u16x8 = u16x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: u16x8 = u16x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let r: u16x8 = transmute(vandq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vand_u32() {
let a: u32x2 = u32x2::new(0x00, 0x01);
let b: u32x2 = u32x2::new(0x0F, 0x0F);
let e: u32x2 = u32x2::new(0x00, 0x01);
let r: u32x2 = transmute(vand_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: u32x2 = u32x2::new(0x00, 0x01);
let b: u32x2 = u32x2::new(0x00, 0x00);
let e: u32x2 = u32x2::new(0x00, 0x00);
let r: u32x2 = transmute(vand_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vandq_u32() {
let a: u32x4 = u32x4::new(0x00, 0x01, 0x02, 0x03);
let b: u32x4 = u32x4::new(0x0F, 0x0F, 0x0F, 0x0F);
let e: u32x4 = u32x4::new(0x00, 0x01, 0x02, 0x03);
let r: u32x4 = transmute(vandq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: u32x4 = u32x4::new(0x00, 0x01, 0x02, 0x03);
let b: u32x4 = u32x4::new(0x00, 0x00, 0x00, 0x00);
let e: u32x4 = u32x4::new(0x00, 0x00, 0x00, 0x00);
let r: u32x4 = transmute(vandq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vand_s64() {
let a: i64x1 = i64x1::new(0x00);
let b: i64x1 = i64x1::new(0x0F);
let e: i64x1 = i64x1::new(0x00);
let r: i64x1 = transmute(vand_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: i64x1 = i64x1::new(0x00);
let b: i64x1 = i64x1::new(0x00);
let e: i64x1 = i64x1::new(0x00);
let r: i64x1 = transmute(vand_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vandq_s64() {
let a: i64x2 = i64x2::new(0x00, 0x01);
let b: i64x2 = i64x2::new(0x0F, 0x0F);
let e: i64x2 = i64x2::new(0x00, 0x01);
let r: i64x2 = transmute(vandq_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: i64x2 = i64x2::new(0x00, 0x01);
let b: i64x2 = i64x2::new(0x00, 0x00);
let e: i64x2 = i64x2::new(0x00, 0x00);
let r: i64x2 = transmute(vandq_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vand_u64() {
let a: u64x1 = u64x1::new(0x00);
let b: u64x1 = u64x1::new(0x0F);
let e: u64x1 = u64x1::new(0x00);
let r: u64x1 = transmute(vand_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: u64x1 = u64x1::new(0x00);
let b: u64x1 = u64x1::new(0x00);
let e: u64x1 = u64x1::new(0x00);
let r: u64x1 = transmute(vand_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vandq_u64() {
let a: u64x2 = u64x2::new(0x00, 0x01);
let b: u64x2 = u64x2::new(0x0F, 0x0F);
let e: u64x2 = u64x2::new(0x00, 0x01);
let r: u64x2 = transmute(vandq_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: u64x2 = u64x2::new(0x00, 0x01);
let b: u64x2 = u64x2::new(0x00, 0x00);
let e: u64x2 = u64x2::new(0x00, 0x00);
let r: u64x2 = transmute(vandq_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vorr_s8() {
let a: i8x8 = i8x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: i8x8 = i8x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: i8x8 = i8x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let r: i8x8 = transmute(vorr_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vorrq_s8() {
let a: i8x16 = i8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
let b: i8x16 = i8x16::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: i8x16 = i8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
let r: i8x16 = transmute(vorrq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vorr_s16() {
let a: i16x4 = i16x4::new(0x00, 0x01, 0x02, 0x03);
let b: i16x4 = i16x4::new(0x00, 0x00, 0x00, 0x00);
let e: i16x4 = i16x4::new(0x00, 0x01, 0x02, 0x03);
let r: i16x4 = transmute(vorr_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vorrq_s16() {
let a: i16x8 = i16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: i16x8 = i16x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: i16x8 = i16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let r: i16x8 = transmute(vorrq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vorr_s32() {
let a: i32x2 = i32x2::new(0x00, 0x01);
let b: i32x2 = i32x2::new(0x00, 0x00);
let e: i32x2 = i32x2::new(0x00, 0x01);
let r: i32x2 = transmute(vorr_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vorrq_s32() {
let a: i32x4 = i32x4::new(0x00, 0x01, 0x02, 0x03);
let b: i32x4 = i32x4::new(0x00, 0x00, 0x00, 0x00);
let e: i32x4 = i32x4::new(0x00, 0x01, 0x02, 0x03);
let r: i32x4 = transmute(vorrq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vorr_u8() {
let a: u8x8 = u8x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: u8x8 = u8x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: u8x8 = u8x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let r: u8x8 = transmute(vorr_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vorrq_u8() {
let a: u8x16 = u8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
let b: u8x16 = u8x16::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: u8x16 = u8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
let r: u8x16 = transmute(vorrq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vorr_u16() {
let a: u16x4 = u16x4::new(0x00, 0x01, 0x02, 0x03);
let b: u16x4 = u16x4::new(0x00, 0x00, 0x00, 0x00);
let e: u16x4 = u16x4::new(0x00, 0x01, 0x02, 0x03);
let r: u16x4 = transmute(vorr_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vorrq_u16() {
let a: u16x8 = u16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: u16x8 = u16x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: u16x8 = u16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let r: u16x8 = transmute(vorrq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vorr_u32() {
let a: u32x2 = u32x2::new(0x00, 0x01);
let b: u32x2 = u32x2::new(0x00, 0x00);
let e: u32x2 = u32x2::new(0x00, 0x01);
let r: u32x2 = transmute(vorr_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vorrq_u32() {
let a: u32x4 = u32x4::new(0x00, 0x01, 0x02, 0x03);
let b: u32x4 = u32x4::new(0x00, 0x00, 0x00, 0x00);
let e: u32x4 = u32x4::new(0x00, 0x01, 0x02, 0x03);
let r: u32x4 = transmute(vorrq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vorr_s64() {
let a: i64x1 = i64x1::new(0x00);
let b: i64x1 = i64x1::new(0x00);
let e: i64x1 = i64x1::new(0x00);
let r: i64x1 = transmute(vorr_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vorrq_s64() {
let a: i64x2 = i64x2::new(0x00, 0x01);
let b: i64x2 = i64x2::new(0x00, 0x00);
let e: i64x2 = i64x2::new(0x00, 0x01);
let r: i64x2 = transmute(vorrq_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vorr_u64() {
let a: u64x1 = u64x1::new(0x00);
let b: u64x1 = u64x1::new(0x00);
let e: u64x1 = u64x1::new(0x00);
let r: u64x1 = transmute(vorr_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vorrq_u64() {
let a: u64x2 = u64x2::new(0x00, 0x01);
let b: u64x2 = u64x2::new(0x00, 0x00);
let e: u64x2 = u64x2::new(0x00, 0x01);
let r: u64x2 = transmute(vorrq_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_veor_s8() {
let a: i8x8 = i8x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: i8x8 = i8x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: i8x8 = i8x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let r: i8x8 = transmute(veor_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_veorq_s8() {
let a: i8x16 = i8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
let b: i8x16 = i8x16::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: i8x16 = i8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
let r: i8x16 = transmute(veorq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_veor_s16() {
let a: i16x4 = i16x4::new(0x00, 0x01, 0x02, 0x03);
let b: i16x4 = i16x4::new(0x00, 0x00, 0x00, 0x00);
let e: i16x4 = i16x4::new(0x00, 0x01, 0x02, 0x03);
let r: i16x4 = transmute(veor_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_veorq_s16() {
let a: i16x8 = i16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: i16x8 = i16x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: i16x8 = i16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let r: i16x8 = transmute(veorq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_veor_s32() {
let a: i32x2 = i32x2::new(0x00, 0x01);
let b: i32x2 = i32x2::new(0x00, 0x00);
let e: i32x2 = i32x2::new(0x00, 0x01);
let r: i32x2 = transmute(veor_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_veorq_s32() {
let a: i32x4 = i32x4::new(0x00, 0x01, 0x02, 0x03);
let b: i32x4 = i32x4::new(0x00, 0x00, 0x00, 0x00);
let e: i32x4 = i32x4::new(0x00, 0x01, 0x02, 0x03);
let r: i32x4 = transmute(veorq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_veor_u8() {
let a: u8x8 = u8x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: u8x8 = u8x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: u8x8 = u8x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let r: u8x8 = transmute(veor_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_veorq_u8() {
let a: u8x16 = u8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
let b: u8x16 = u8x16::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: u8x16 = u8x16::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
let r: u8x16 = transmute(veorq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_veor_u16() {
let a: u16x4 = u16x4::new(0x00, 0x01, 0x02, 0x03);
let b: u16x4 = u16x4::new(0x00, 0x00, 0x00, 0x00);
let e: u16x4 = u16x4::new(0x00, 0x01, 0x02, 0x03);
let r: u16x4 = transmute(veor_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_veorq_u16() {
let a: u16x8 = u16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: u16x8 = u16x8::new(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: u16x8 = u16x8::new(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let r: u16x8 = transmute(veorq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_veor_u32() {
let a: u32x2 = u32x2::new(0x00, 0x01);
let b: u32x2 = u32x2::new(0x00, 0x00);
let e: u32x2 = u32x2::new(0x00, 0x01);
let r: u32x2 = transmute(veor_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_veorq_u32() {
let a: u32x4 = u32x4::new(0x00, 0x01, 0x02, 0x03);
let b: u32x4 = u32x4::new(0x00, 0x00, 0x00, 0x00);
let e: u32x4 = u32x4::new(0x00, 0x01, 0x02, 0x03);
let r: u32x4 = transmute(veorq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_veor_s64() {
let a: i64x1 = i64x1::new(0x00);
let b: i64x1 = i64x1::new(0x00);
let e: i64x1 = i64x1::new(0x00);
let r: i64x1 = transmute(veor_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_veorq_s64() {
let a: i64x2 = i64x2::new(0x00, 0x01);
let b: i64x2 = i64x2::new(0x00, 0x00);
let e: i64x2 = i64x2::new(0x00, 0x01);
let r: i64x2 = transmute(veorq_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_veor_u64() {
let a: u64x1 = u64x1::new(0x00);
let b: u64x1 = u64x1::new(0x00);
let e: u64x1 = u64x1::new(0x00);
let r: u64x1 = transmute(veor_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_veorq_u64() {
let a: u64x2 = u64x2::new(0x00, 0x01);
let b: u64x2 = u64x2::new(0x00, 0x00);
let e: u64x2 = u64x2::new(0x00, 0x01);
let r: u64x2 = transmute(veorq_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabd_s8() {
let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i8x8 = i8x8::new(16, 15, 14, 13, 12, 11, 10, 9);
let e: i8x8 = i8x8::new(15, 13, 11, 9, 7, 5, 3, 1);
let r: i8x8 = transmute(vabd_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabdq_s8() {
let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: i8x16 = i8x16::new(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1);
let e: i8x16 = i8x16::new(15, 13, 11, 9, 7, 5, 3, 1, 1, 3, 5, 7, 9, 11, 13, 15);
let r: i8x16 = transmute(vabdq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabd_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(16, 15, 14, 13);
let e: i16x4 = i16x4::new(15, 13, 11, 9);
let r: i16x4 = transmute(vabd_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabdq_s16() {
let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i16x8 = i16x8::new(16, 15, 14, 13, 12, 11, 10, 9);
let e: i16x8 = i16x8::new(15, 13, 11, 9, 7, 5, 3, 1);
let r: i16x8 = transmute(vabdq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabd_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32x2 = i32x2::new(16, 15);
let e: i32x2 = i32x2::new(15, 13);
let r: i32x2 = transmute(vabd_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabdq_s32() {
let a: i32x4 = i32x4::new(1, 2, 3, 4);
let b: i32x4 = i32x4::new(16, 15, 14, 13);
let e: i32x4 = i32x4::new(15, 13, 11, 9);
let r: i32x4 = transmute(vabdq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabd_u8() {
let a: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u8x8 = u8x8::new(16, 15, 14, 13, 12, 11, 10, 9);
let e: u8x8 = u8x8::new(15, 13, 11, 9, 7, 5, 3, 1);
let r: u8x8 = transmute(vabd_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabdq_u8() {
let a: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: u8x16 = u8x16::new(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1);
let e: u8x16 = u8x16::new(15, 13, 11, 9, 7, 5, 3, 1, 1, 3, 5, 7, 9, 11, 13, 15);
let r: u8x16 = transmute(vabdq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabd_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let b: u16x4 = u16x4::new(16, 15, 14, 13);
let e: u16x4 = u16x4::new(15, 13, 11, 9);
let r: u16x4 = transmute(vabd_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabdq_u16() {
let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u16x8 = u16x8::new(16, 15, 14, 13, 12, 11, 10, 9);
let e: u16x8 = u16x8::new(15, 13, 11, 9, 7, 5, 3, 1);
let r: u16x8 = transmute(vabdq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabd_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u32x2 = u32x2::new(16, 15);
let e: u32x2 = u32x2::new(15, 13);
let r: u32x2 = transmute(vabd_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabdq_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let b: u32x4 = u32x4::new(16, 15, 14, 13);
let e: u32x4 = u32x4::new(15, 13, 11, 9);
let r: u32x4 = transmute(vabdq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabd_f32() {
let a: f32x2 = f32x2::new(1.0, 2.0);
let b: f32x2 = f32x2::new(9.0, 3.0);
let e: f32x2 = f32x2::new(8.0, 1.0);
let r: f32x2 = transmute(vabd_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabdq_f32() {
let a: f32x4 = f32x4::new(1.0, 2.0, 5.0, -4.0);
let b: f32x4 = f32x4::new(9.0, 3.0, 2.0, 8.0);
let e: f32x4 = f32x4::new(8.0, 1.0, 3.0, 12.0);
let r: f32x4 = transmute(vabdq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabdl_u8() {
let a: u8x8 = u8x8::new(1, 2, 3, 4, 4, 3, 2, 1);
let b: u8x8 = u8x8::new(10, 10, 10, 10, 10, 10, 10, 10);
let e: u16x8 = u16x8::new(9, 8, 7, 6, 6, 7, 8, 9);
let r: u16x8 = transmute(vabdl_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabdl_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let b: u16x4 = u16x4::new(10, 10, 10, 10);
let e: u32x4 = u32x4::new(9, 8, 7, 6);
let r: u32x4 = transmute(vabdl_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabdl_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u32x2 = u32x2::new(10, 10);
let e: u64x2 = u64x2::new(9, 8);
let r: u64x2 = transmute(vabdl_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabdl_s8() {
let a: i8x8 = i8x8::new(1, 2, 3, 4, 4, 3, 2, 1);
let b: i8x8 = i8x8::new(10, 10, 10, 10, 10, 10, 10, 10);
let e: i16x8 = i16x8::new(9, 8, 7, 6, 6, 7, 8, 9);
let r: i16x8 = transmute(vabdl_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabdl_s16() {
let a: i16x4 = i16x4::new(1, 2, 11, 12);
let b: i16x4 = i16x4::new(10, 10, 10, 10);
let e: i32x4 = i32x4::new(9, 8, 1, 2);
let r: i32x4 = transmute(vabdl_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabdl_s32() {
let a: i32x2 = i32x2::new(1, 11);
let b: i32x2 = i32x2::new(10, 10);
let e: i64x2 = i64x2::new(9, 1);
let r: i64x2 = transmute(vabdl_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vceq_u8() {
let a: u8x8 = u8x8::new(0, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: u8x8 = u8x8::new(0, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let e: u8x8 = u8x8::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x8 = transmute(vceq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: u8x8 = u8x8::new(0, 0, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: u8x8 = u8x8::new(0, 0xFF, 0x02, 0x04, 0x04, 0x00, 0x06, 0x08);
let e: u8x8 = u8x8::new(0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0);
let r: u8x8 = transmute(vceq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vceqq_u8() {
let a: u8x16 = u8x16::new(0, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0xFF);
let b: u8x16 = u8x16::new(0, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0xFF);
let e: u8x16 = u8x16::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x16 = transmute(vceqq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: u8x16 = u8x16::new(0, 0, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0xCC, 0x0D, 0xEE, 0xFF);
let b: u8x16 = u8x16::new(0, 0xFF, 0x02, 0x04, 0x04, 0x00, 0x06, 0x08, 0x08, 0x00, 0x0A, 0x0A, 0xCC, 0xD0, 0xEE, 0);
let e: u8x16 = u8x16::new(0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0);
let r: u8x16 = transmute(vceqq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vceq_u16() {
let a: u16x4 = u16x4::new(0, 0x01, 0x02, 0x03);
let b: u16x4 = u16x4::new(0, 0x01, 0x02, 0x03);
let e: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x4 = transmute(vceq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: u16x4 = u16x4::new(0, 0, 0x02, 0x03);
let b: u16x4 = u16x4::new(0, 0xFF_FF, 0x02, 0x04);
let e: u16x4 = u16x4::new(0xFF_FF, 0, 0xFF_FF, 0);
let r: u16x4 = transmute(vceq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vceqq_u16() {
let a: u16x8 = u16x8::new(0, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: u16x8 = u16x8::new(0, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x8 = transmute(vceqq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: u16x8 = u16x8::new(0, 0, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: u16x8 = u16x8::new(0, 0xFF_FF, 0x02, 0x04, 0x04, 0x00, 0x06, 0x08);
let e: u16x8 = u16x8::new(0xFF_FF, 0, 0xFF_FF, 0, 0xFF_FF, 0, 0xFF_FF, 0);
let r: u16x8 = transmute(vceqq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vceq_u32() {
let a: u32x2 = u32x2::new(0, 0x01);
let b: u32x2 = u32x2::new(0, 0x01);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vceq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: u32x2 = u32x2::new(0, 0);
let b: u32x2 = u32x2::new(0, 0xFF_FF_FF_FF);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0);
let r: u32x2 = transmute(vceq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vceqq_u32() {
let a: u32x4 = u32x4::new(0, 0x01, 0x02, 0x03);
let b: u32x4 = u32x4::new(0, 0x01, 0x02, 0x03);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vceqq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: u32x4 = u32x4::new(0, 0, 0x02, 0x03);
let b: u32x4 = u32x4::new(0, 0xFF_FF_FF_FF, 0x02, 0x04);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0, 0xFF_FF_FF_FF, 0);
let r: u32x4 = transmute(vceqq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vceq_s8() {
let a: i8x8 = i8x8::new(-128, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: i8x8 = i8x8::new(-128, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let e: u8x8 = u8x8::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x8 = transmute(vceq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: i8x8 = i8x8::new(-128, -128, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: i8x8 = i8x8::new(-128, 0x7F, 0x02, 0x04, 0x04, 0x00, 0x06, 0x08);
let e: u8x8 = u8x8::new(0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0);
let r: u8x8 = transmute(vceq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vceqq_s8() {
let a: i8x16 = i8x16::new(-128, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x7F);
let b: i8x16 = i8x16::new(-128, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x7F);
let e: u8x16 = u8x16::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x16 = transmute(vceqq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: i8x16 = i8x16::new(-128, -128, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0xCC, 0x0D, 0xEE, 0x7F);
let b: i8x16 = i8x16::new(-128, 0x7F, 0x02, 0x04, 0x04, 0x00, 0x06, 0x08, 0x08, 0x00, 0x0A, 0x0A, 0xCC, 0xD0, 0xEE, -128);
let e: u8x16 = u8x16::new(0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0);
let r: u8x16 = transmute(vceqq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vceq_s16() {
let a: i16x4 = i16x4::new(-32768, 0x01, 0x02, 0x03);
let b: i16x4 = i16x4::new(-32768, 0x01, 0x02, 0x03);
let e: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x4 = transmute(vceq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: i16x4 = i16x4::new(-32768, -32768, 0x02, 0x03);
let b: i16x4 = i16x4::new(-32768, 0x7F_FF, 0x02, 0x04);
let e: u16x4 = u16x4::new(0xFF_FF, 0, 0xFF_FF, 0);
let r: u16x4 = transmute(vceq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vceqq_s16() {
let a: i16x8 = i16x8::new(-32768, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: i16x8 = i16x8::new(-32768, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x8 = transmute(vceqq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: i16x8 = i16x8::new(-32768, -32768, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: i16x8 = i16x8::new(-32768, 0x7F_FF, 0x02, 0x04, 0x04, 0x00, 0x06, 0x08);
let e: u16x8 = u16x8::new(0xFF_FF, 0, 0xFF_FF, 0, 0xFF_FF, 0, 0xFF_FF, 0);
let r: u16x8 = transmute(vceqq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vceq_s32() {
let a: i32x2 = i32x2::new(-2147483648, 0x01);
let b: i32x2 = i32x2::new(-2147483648, 0x01);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vceq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: i32x2 = i32x2::new(-2147483648, -2147483648);
let b: i32x2 = i32x2::new(-2147483648, 0x7F_FF_FF_FF);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0);
let r: u32x2 = transmute(vceq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vceqq_s32() {
let a: i32x4 = i32x4::new(-2147483648, 0x01, 0x02, 0x03);
let b: i32x4 = i32x4::new(-2147483648, 0x01, 0x02, 0x03);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vceqq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: i32x4 = i32x4::new(-2147483648, -2147483648, 0x02, 0x03);
let b: i32x4 = i32x4::new(-2147483648, 0x7F_FF_FF_FF, 0x02, 0x04);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0, 0xFF_FF_FF_FF, 0);
let r: u32x4 = transmute(vceqq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vceq_p8() {
let a: i8x8 = i8x8::new(-128, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: i8x8 = i8x8::new(-128, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let e: u8x8 = u8x8::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x8 = transmute(vceq_p8(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: i8x8 = i8x8::new(-128, -128, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
let b: i8x8 = i8x8::new(-128, 0x7F, 0x02, 0x04, 0x04, 0x00, 0x06, 0x08);
let e: u8x8 = u8x8::new(0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0);
let r: u8x8 = transmute(vceq_p8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vceqq_p8() {
let a: i8x16 = i8x16::new(-128, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x7F);
let b: i8x16 = i8x16::new(-128, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x7F);
let e: u8x16 = u8x16::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x16 = transmute(vceqq_p8(transmute(a), transmute(b)));
assert_eq!(r, e);
let a: i8x16 = i8x16::new(-128, -128, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0xCC, 0x0D, 0xEE, 0x7F);
let b: i8x16 = i8x16::new(-128, 0x7F, 0x02, 0x04, 0x04, 0x00, 0x06, 0x08, 0x08, 0x00, 0x0A, 0x0A, 0xCC, 0xD0, 0xEE, -128);
let e: u8x16 = u8x16::new(0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0);
let r: u8x16 = transmute(vceqq_p8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vceq_f32() {
let a: f32x2 = f32x2::new(1.2, 3.4);
let b: f32x2 = f32x2::new(1.2, 3.4);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vceq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vceqq_f32() {
let a: f32x4 = f32x4::new(1.2, 3.4, 5.6, 7.8);
let b: f32x4 = f32x4::new(1.2, 3.4, 5.6, 7.8);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vceqq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtst_s8() {
let a: i8x8 = i8x8::new(-128, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
let b: i8x8 = i8x8::new(-128, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
let e: u8x8 = u8x8::new(0xFF, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x8 = transmute(vtst_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtstq_s8() {
let a: i8x16 = i8x16::new(-128, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x7F);
let b: i8x16 = i8x16::new(-128, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x7F);
let e: u8x16 = u8x16::new(0xFF, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x16 = transmute(vtstq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtst_s16() {
let a: i16x4 = i16x4::new(-32768, 0x00, 0x01, 0x02);
let b: i16x4 = i16x4::new(-32768, 0x00, 0x01, 0x02);
let e: u16x4 = u16x4::new(0xFF_FF, 0, 0xFF_FF, 0xFF_FF);
let r: u16x4 = transmute(vtst_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtstq_s16() {
let a: i16x8 = i16x8::new(-32768, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
let b: i16x8 = i16x8::new(-32768, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
let e: u16x8 = u16x8::new(0xFF_FF, 0, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x8 = transmute(vtstq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtst_s32() {
let a: i32x2 = i32x2::new(-2147483648, 0x00);
let b: i32x2 = i32x2::new(-2147483648, 0x00);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0);
let r: u32x2 = transmute(vtst_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtstq_s32() {
let a: i32x4 = i32x4::new(-2147483648, 0x00, 0x01, 0x02);
let b: i32x4 = i32x4::new(-2147483648, 0x00, 0x01, 0x02);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vtstq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtst_p8() {
let a: i8x8 = i8x8::new(-128, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
let b: i8x8 = i8x8::new(-128, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
let e: u8x8 = u8x8::new(0xFF, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x8 = transmute(vtst_p8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtstq_p8() {
let a: i8x16 = i8x16::new(-128, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x7F);
let b: i8x16 = i8x16::new(-128, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x7F);
let e: u8x16 = u8x16::new(0xFF, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x16 = transmute(vtstq_p8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtst_p16() {
let a: i16x4 = i16x4::new(-32768, 0x00, 0x01, 0x02);
let b: i16x4 = i16x4::new(-32768, 0x00, 0x01, 0x02);
let e: u16x4 = u16x4::new(0xFF_FF, 0, 0xFF_FF, 0xFF_FF);
let r: u16x4 = transmute(vtst_p16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtstq_p16() {
let a: i16x8 = i16x8::new(-32768, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
let b: i16x8 = i16x8::new(-32768, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
let e: u16x8 = u16x8::new(0xFF_FF, 0, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x8 = transmute(vtstq_p16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtst_u8() {
let a: u8x8 = u8x8::new(0, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
let b: u8x8 = u8x8::new(0, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
let e: u8x8 = u8x8::new(0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x8 = transmute(vtst_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtstq_u8() {
let a: u8x16 = u8x16::new(0, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0xFF);
let b: u8x16 = u8x16::new(0, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0xFF);
let e: u8x16 = u8x16::new(0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x16 = transmute(vtstq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtst_u16() {
let a: u16x4 = u16x4::new(0, 0x00, 0x01, 0x02);
let b: u16x4 = u16x4::new(0, 0x00, 0x01, 0x02);
let e: u16x4 = u16x4::new(0, 0, 0xFF_FF, 0xFF_FF);
let r: u16x4 = transmute(vtst_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtstq_u16() {
let a: u16x8 = u16x8::new(0, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
let b: u16x8 = u16x8::new(0, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06);
let e: u16x8 = u16x8::new(0, 0, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x8 = transmute(vtstq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtst_u32() {
let a: u32x2 = u32x2::new(0, 0x00);
let b: u32x2 = u32x2::new(0, 0x00);
let e: u32x2 = u32x2::new(0, 0);
let r: u32x2 = transmute(vtst_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtstq_u32() {
let a: u32x4 = u32x4::new(0, 0x00, 0x01, 0x02);
let b: u32x4 = u32x4::new(0, 0x00, 0x01, 0x02);
let e: u32x4 = u32x4::new(0, 0, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vtstq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabs_f32() {
let a: f32x2 = f32x2::new(-0.1, -2.2);
let e: f32x2 = f32x2::new(0.1, 2.2);
let r: f32x2 = transmute(vabs_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabsq_f32() {
let a: f32x4 = f32x4::new(-0.1, -2.2, -3.3, -6.6);
let e: f32x4 = f32x4::new(0.1, 2.2, 3.3, 6.6);
let r: f32x4 = transmute(vabsq_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgt_s8() {
let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u8x8 = u8x8::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x8 = transmute(vcgt_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgtq_s8() {
let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let e: u8x16 = u8x16::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x16 = transmute(vcgtq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgt_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(0, 1, 2, 3);
let e: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x4 = transmute(vcgt_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgtq_s16() {
let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x8 = transmute(vcgtq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgt_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32x2 = i32x2::new(0, 1);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vcgt_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgtq_s32() {
let a: i32x4 = i32x4::new(1, 2, 3, 4);
let b: i32x4 = i32x4::new(0, 1, 2, 3);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vcgtq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgt_u8() {
let a: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u8x8 = u8x8::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x8 = transmute(vcgt_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgtq_u8() {
let a: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let e: u8x16 = u8x16::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x16 = transmute(vcgtq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgt_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let b: u16x4 = u16x4::new(0, 1, 2, 3);
let e: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x4 = transmute(vcgt_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgtq_u16() {
let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x8 = transmute(vcgtq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgt_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u32x2 = u32x2::new(0, 1);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vcgt_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgtq_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let b: u32x4 = u32x4::new(0, 1, 2, 3);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vcgtq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgt_f32() {
let a: f32x2 = f32x2::new(1.2, 2.3);
let b: f32x2 = f32x2::new(0.1, 1.2);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vcgt_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgtq_f32() {
let a: f32x4 = f32x4::new(1.2, 2.3, 3.4, 4.5);
let b: f32x4 = f32x4::new(0.1, 1.2, 2.3, 3.4);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vcgtq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclt_s8() {
let a: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u8x8 = u8x8::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x8 = transmute(vclt_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcltq_s8() {
let a: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: u8x16 = u8x16::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x16 = transmute(vcltq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclt_s16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let b: i16x4 = i16x4::new(1, 2, 3, 4);
let e: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x4 = transmute(vclt_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcltq_s16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x8 = transmute(vcltq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclt_s32() {
let a: i32x2 = i32x2::new(0, 1);
let b: i32x2 = i32x2::new(1, 2);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vclt_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcltq_s32() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let b: i32x4 = i32x4::new(1, 2, 3, 4);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vcltq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclt_u8() {
let a: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u8x8 = u8x8::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x8 = transmute(vclt_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcltq_u8() {
let a: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: u8x16 = u8x16::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x16 = transmute(vcltq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclt_u16() {
let a: u16x4 = u16x4::new(0, 1, 2, 3);
let b: u16x4 = u16x4::new(1, 2, 3, 4);
let e: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x4 = transmute(vclt_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcltq_u16() {
let a: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x8 = transmute(vcltq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclt_u32() {
let a: u32x2 = u32x2::new(0, 1);
let b: u32x2 = u32x2::new(1, 2);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vclt_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcltq_u32() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let b: u32x4 = u32x4::new(1, 2, 3, 4);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vcltq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclt_f32() {
let a: f32x2 = f32x2::new(0.1, 1.2);
let b: f32x2 = f32x2::new(1.2, 2.3);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vclt_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcltq_f32() {
let a: f32x4 = f32x4::new(0.1, 1.2, 2.3, 3.4);
let b: f32x4 = f32x4::new(1.2, 2.3, 3.4, 4.5);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vcltq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcle_s8() {
let a: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u8x8 = u8x8::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x8 = transmute(vcle_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcleq_s8() {
let a: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: u8x16 = u8x16::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x16 = transmute(vcleq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcle_s16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let b: i16x4 = i16x4::new(1, 2, 3, 4);
let e: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x4 = transmute(vcle_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcleq_s16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x8 = transmute(vcleq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcle_s32() {
let a: i32x2 = i32x2::new(0, 1);
let b: i32x2 = i32x2::new(1, 2);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vcle_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcleq_s32() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let b: i32x4 = i32x4::new(1, 2, 3, 4);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vcleq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcle_u8() {
let a: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u8x8 = u8x8::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x8 = transmute(vcle_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcleq_u8() {
let a: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: u8x16 = u8x16::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x16 = transmute(vcleq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcle_u16() {
let a: u16x4 = u16x4::new(0, 1, 2, 3);
let b: u16x4 = u16x4::new(1, 2, 3, 4);
let e: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x4 = transmute(vcle_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcleq_u16() {
let a: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x8 = transmute(vcleq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcle_u32() {
let a: u32x2 = u32x2::new(0, 1);
let b: u32x2 = u32x2::new(1, 2);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vcle_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcleq_u32() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let b: u32x4 = u32x4::new(1, 2, 3, 4);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vcleq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcle_f32() {
let a: f32x2 = f32x2::new(0.1, 1.2);
let b: f32x2 = f32x2::new(1.2, 2.3);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vcle_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcleq_f32() {
let a: f32x4 = f32x4::new(0.1, 1.2, 2.3, 3.4);
let b: f32x4 = f32x4::new(1.2, 2.3, 3.4, 4.5);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vcleq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcge_s8() {
let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u8x8 = u8x8::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x8 = transmute(vcge_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgeq_s8() {
let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let e: u8x16 = u8x16::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x16 = transmute(vcgeq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcge_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(0, 1, 2, 3);
let e: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x4 = transmute(vcge_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgeq_s16() {
let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x8 = transmute(vcgeq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcge_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32x2 = i32x2::new(0, 1);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vcge_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgeq_s32() {
let a: i32x4 = i32x4::new(1, 2, 3, 4);
let b: i32x4 = i32x4::new(0, 1, 2, 3);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vcgeq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcge_u8() {
let a: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u8x8 = u8x8::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x8 = transmute(vcge_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgeq_u8() {
let a: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let e: u8x16 = u8x16::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x16 = transmute(vcgeq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcge_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let b: u16x4 = u16x4::new(0, 1, 2, 3);
let e: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x4 = transmute(vcge_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgeq_u16() {
let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x8 = transmute(vcgeq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcge_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u32x2 = u32x2::new(0, 1);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vcge_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgeq_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let b: u32x4 = u32x4::new(0, 1, 2, 3);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vcgeq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcge_f32() {
let a: f32x2 = f32x2::new(1.2, 2.3);
let b: f32x2 = f32x2::new(0.1, 1.2);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vcge_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcgeq_f32() {
let a: f32x4 = f32x4::new(1.2, 2.3, 3.4, 4.5);
let b: f32x4 = f32x4::new(0.1, 1.2, 2.3, 3.4);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vcgeq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcls_s8() {
let a: i8x8 = i8x8::new(-128, -1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: i8x8 = i8x8::new(0, 7, 7, 7, 7, 7, 7, 7);
let r: i8x8 = transmute(vcls_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclsq_s8() {
let a: i8x16 = i8x16::new(-128, -1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F);
let e: i8x16 = i8x16::new(0, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 0);
let r: i8x16 = transmute(vclsq_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcls_s16() {
let a: i16x4 = i16x4::new(-32768, -1, 0x00, 0x00);
let e: i16x4 = i16x4::new(0, 15, 15, 15);
let r: i16x4 = transmute(vcls_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclsq_s16() {
let a: i16x8 = i16x8::new(-32768, -1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: i16x8 = i16x8::new(0, 15, 15, 15, 15, 15, 15, 15);
let r: i16x8 = transmute(vclsq_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcls_s32() {
let a: i32x2 = i32x2::new(-2147483648, -1);
let e: i32x2 = i32x2::new(0, 31);
let r: i32x2 = transmute(vcls_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclsq_s32() {
let a: i32x4 = i32x4::new(-2147483648, -1, 0x00, 0x00);
let e: i32x4 = i32x4::new(0, 31, 31, 31);
let r: i32x4 = transmute(vclsq_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcls_u8() {
let a: u8x8 = u8x8::new(0, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: i8x8 = i8x8::new(7, 7, 7, 7, 7, 7, 7, 7);
let r: i8x8 = transmute(vcls_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclsq_u8() {
let a: u8x16 = u8x16::new(0, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF);
let e: i8x16 = i8x16::new(7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7);
let r: i8x16 = transmute(vclsq_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcls_u16() {
let a: u16x4 = u16x4::new(0, 0xFF_FF, 0x00, 0x00);
let e: i16x4 = i16x4::new(15, 15, 15, 15);
let r: i16x4 = transmute(vcls_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclsq_u16() {
let a: u16x8 = u16x8::new(0, 0xFF_FF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
let e: i16x8 = i16x8::new(15, 15, 15, 15, 15, 15, 15, 15);
let r: i16x8 = transmute(vclsq_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcls_u32() {
let a: u32x2 = u32x2::new(0, 0xFF_FF_FF_FF);
let e: i32x2 = i32x2::new(31, 31);
let r: i32x2 = transmute(vcls_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclsq_u32() {
let a: u32x4 = u32x4::new(0, 0xFF_FF_FF_FF, 0x00, 0x00);
let e: i32x4 = i32x4::new(31, 31, 31, 31);
let r: i32x4 = transmute(vclsq_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclz_s8() {
let a: i8x8 = i8x8::new(-128, -1, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01);
let e: i8x8 = i8x8::new(0, 0, 8, 7, 7, 7, 7, 7);
let r: i8x8 = transmute(vclz_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclzq_s8() {
let a: i8x16 = i8x16::new(-128, -1, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x7F);
let e: i8x16 = i8x16::new(0, 0, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 1);
let r: i8x16 = transmute(vclzq_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclz_s16() {
let a: i16x4 = i16x4::new(-32768, -1, 0x00, 0x01);
let e: i16x4 = i16x4::new(0, 0, 16, 15);
let r: i16x4 = transmute(vclz_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclzq_s16() {
let a: i16x8 = i16x8::new(-32768, -1, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01);
let e: i16x8 = i16x8::new(0, 0, 16, 15, 15, 15, 15, 15);
let r: i16x8 = transmute(vclzq_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclz_s32() {
let a: i32x2 = i32x2::new(-2147483648, -1);
let e: i32x2 = i32x2::new(0, 0);
let r: i32x2 = transmute(vclz_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclzq_s32() {
let a: i32x4 = i32x4::new(-2147483648, -1, 0x00, 0x01);
let e: i32x4 = i32x4::new(0, 0, 32, 31);
let r: i32x4 = transmute(vclzq_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclz_u8() {
let a: u8x8 = u8x8::new(0, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01);
let e: u8x8 = u8x8::new(8, 8, 7, 7, 7, 7, 7, 7);
let r: u8x8 = transmute(vclz_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclzq_u8() {
let a: u8x16 = u8x16::new(0, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xFF);
let e: u8x16 = u8x16::new(8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 0);
let r: u8x16 = transmute(vclzq_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclz_u16() {
let a: u16x4 = u16x4::new(0, 0x00, 0x01, 0x01);
let e: u16x4 = u16x4::new(16, 16, 15, 15);
let r: u16x4 = transmute(vclz_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclzq_u16() {
let a: u16x8 = u16x8::new(0, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01);
let e: u16x8 = u16x8::new(16, 16, 15, 15, 15, 15, 15, 15);
let r: u16x8 = transmute(vclzq_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclz_u32() {
let a: u32x2 = u32x2::new(0, 0x00);
let e: u32x2 = u32x2::new(32, 32);
let r: u32x2 = transmute(vclz_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vclzq_u32() {
let a: u32x4 = u32x4::new(0, 0x00, 0x01, 0x01);
let e: u32x4 = u32x4::new(32, 32, 31, 31);
let r: u32x4 = transmute(vclzq_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcagt_f32() {
let a: f32x2 = f32x2::new(-1.2, 0.0);
let b: f32x2 = f32x2::new(-1.1, 0.0);
let e: u32x2 = u32x2::new(!0, 0);
let r: u32x2 = transmute(vcagt_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcagtq_f32() {
let a: f32x4 = f32x4::new(-1.2, 0.0, 1.2, 2.3);
let b: f32x4 = f32x4::new(-1.1, 0.0, 1.1, 2.4);
let e: u32x4 = u32x4::new(!0, 0, 0xFF_FF_FF_FF, 0);
let r: u32x4 = transmute(vcagtq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcage_f32() {
let a: f32x2 = f32x2::new(-1.2, 0.0);
let b: f32x2 = f32x2::new(-1.1, 0.0);
let e: u32x2 = u32x2::new(!0, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vcage_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcageq_f32() {
let a: f32x4 = f32x4::new(-1.2, 0.0, 1.2, 2.3);
let b: f32x4 = f32x4::new(-1.1, 0.0, 1.1, 2.4);
let e: u32x4 = u32x4::new(!0, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0);
let r: u32x4 = transmute(vcageq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcalt_f32() {
let a: f32x2 = f32x2::new(-1.2, 0.0);
let b: f32x2 = f32x2::new(-1.1, 0.0);
let e: u32x2 = u32x2::new(0, 0);
let r: u32x2 = transmute(vcalt_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcaltq_f32() {
let a: f32x4 = f32x4::new(-1.2, 0.0, 1.2, 2.3);
let b: f32x4 = f32x4::new(-1.1, 0.0, 1.1, 2.4);
let e: u32x4 = u32x4::new(0, 0, 0, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vcaltq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcale_f32() {
let a: f32x2 = f32x2::new(-1.2, 0.0);
let b: f32x2 = f32x2::new(-1.1, 0.0);
let e: u32x2 = u32x2::new(0, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vcale_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcaleq_f32() {
let a: f32x4 = f32x4::new(-1.2, 0.0, 1.2, 2.3);
let b: f32x4 = f32x4::new(-1.1, 0.0, 1.1, 2.4);
let e: u32x4 = u32x4::new(0, 0xFF_FF_FF_FF, 0, 0xFF_FF_FF_FF);
let r: u32x4 = transmute(vcaleq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcreate_s8() {
let a: u64 = 1;
let e: i8x8 = i8x8::new(1, 0, 0, 0, 0, 0, 0, 0);
let r: i8x8 = transmute(vcreate_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcreate_s16() {
let a: u64 = 1;
let e: i16x4 = i16x4::new(1, 0, 0, 0);
let r: i16x4 = transmute(vcreate_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcreate_s32() {
let a: u64 = 1;
let e: i32x2 = i32x2::new(1, 0);
let r: i32x2 = transmute(vcreate_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcreate_s64() {
let a: u64 = 1;
let e: i64x1 = i64x1::new(1);
let r: i64x1 = transmute(vcreate_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcreate_u8() {
let a: u64 = 1;
let e: u8x8 = u8x8::new(1, 0, 0, 0, 0, 0, 0, 0);
let r: u8x8 = transmute(vcreate_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcreate_u16() {
let a: u64 = 1;
let e: u16x4 = u16x4::new(1, 0, 0, 0);
let r: u16x4 = transmute(vcreate_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcreate_u32() {
let a: u64 = 1;
let e: u32x2 = u32x2::new(1, 0);
let r: u32x2 = transmute(vcreate_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcreate_u64() {
let a: u64 = 1;
let e: u64x1 = u64x1::new(1);
let r: u64x1 = transmute(vcreate_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcreate_p8() {
let a: u64 = 1;
let e: i8x8 = i8x8::new(1, 0, 0, 0, 0, 0, 0, 0);
let r: i8x8 = transmute(vcreate_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcreate_p16() {
let a: u64 = 1;
let e: i16x4 = i16x4::new(1, 0, 0, 0);
let r: i16x4 = transmute(vcreate_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcreate_p64() {
let a: u64 = 1;
let e: i64x1 = i64x1::new(1);
let r: i64x1 = transmute(vcreate_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcreate_f32() {
let a: u64 = 0;
let e: f32x2 = f32x2::new(0., 0.);
let r: f32x2 = transmute(vcreate_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcvt_f32_s32() {
let a: i32x2 = i32x2::new(1, 2);
let e: f32x2 = f32x2::new(1., 2.);
let r: f32x2 = transmute(vcvt_f32_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcvtq_f32_s32() {
let a: i32x4 = i32x4::new(1, 2, 3, 4);
let e: f32x4 = f32x4::new(1., 2., 3., 4.);
let r: f32x4 = transmute(vcvtq_f32_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcvt_f32_u32() {
let a: u32x2 = u32x2::new(1, 2);
let e: f32x2 = f32x2::new(1., 2.);
let r: f32x2 = transmute(vcvt_f32_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcvtq_f32_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let e: f32x4 = f32x4::new(1., 2., 3., 4.);
let r: f32x4 = transmute(vcvtq_f32_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcvt_n_f32_s32() {
let a: i32x2 = i32x2::new(1, 2);
let e: f32x2 = f32x2::new(0.25, 0.5);
let r: f32x2 = transmute(vcvt_n_f32_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcvtq_n_f32_s32() {
let a: i32x4 = i32x4::new(1, 2, 3, 4);
let e: f32x4 = f32x4::new(0.25, 0.5, 0.75, 1.);
let r: f32x4 = transmute(vcvtq_n_f32_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcvt_n_f32_u32() {
let a: u32x2 = u32x2::new(1, 2);
let e: f32x2 = f32x2::new(0.25, 0.5);
let r: f32x2 = transmute(vcvt_n_f32_u32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcvtq_n_f32_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let e: f32x4 = f32x4::new(0.25, 0.5, 0.75, 1.);
let r: f32x4 = transmute(vcvtq_n_f32_u32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcvt_n_s32_f32() {
let a: f32x2 = f32x2::new(0.25, 0.5);
let e: i32x2 = i32x2::new(1, 2);
let r: i32x2 = transmute(vcvt_n_s32_f32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcvtq_n_s32_f32() {
let a: f32x4 = f32x4::new(0.25, 0.5, 0.75, 1.);
let e: i32x4 = i32x4::new(1, 2, 3, 4);
let r: i32x4 = transmute(vcvtq_n_s32_f32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcvt_n_u32_f32() {
let a: f32x2 = f32x2::new(0.25, 0.5);
let e: u32x2 = u32x2::new(1, 2);
let r: u32x2 = transmute(vcvt_n_u32_f32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcvtq_n_u32_f32() {
let a: f32x4 = f32x4::new(0.25, 0.5, 0.75, 1.);
let e: u32x4 = u32x4::new(1, 2, 3, 4);
let r: u32x4 = transmute(vcvtq_n_u32_f32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcvt_s32_f32() {
let a: f32x2 = f32x2::new(-1.1, 2.1);
let e: i32x2 = i32x2::new(-1, 2);
let r: i32x2 = transmute(vcvt_s32_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcvtq_s32_f32() {
let a: f32x4 = f32x4::new(-1.1, 2.1, -2.9, 3.9);
let e: i32x4 = i32x4::new(-1, 2, -2, 3);
let r: i32x4 = transmute(vcvtq_s32_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcvt_u32_f32() {
let a: f32x2 = f32x2::new(1.1, 2.1);
let e: u32x2 = u32x2::new(1, 2);
let r: u32x2 = transmute(vcvt_u32_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vcvtq_u32_f32() {
let a: f32x4 = f32x4::new(1.1, 2.1, 2.9, 3.9);
let e: u32x4 = u32x4::new(1, 2, 2, 3);
let r: u32x4 = transmute(vcvtq_u32_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_lane_s8() {
let a: i8x8 = i8x8::new(1, 1, 1, 4, 1, 6, 7, 8);
let e: i8x8 = i8x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let r: i8x8 = transmute(vdup_lane_s8::<4>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_laneq_s8() {
let a: i8x16 = i8x16::new(1, 1, 1, 4, 1, 6, 7, 8, 1, 10, 11, 12, 13, 14, 15, 16);
let e: i8x16 = i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
let r: i8x16 = transmute(vdupq_laneq_s8::<8>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_lane_s16() {
let a: i16x4 = i16x4::new(1, 1, 1, 4);
let e: i16x4 = i16x4::new(1, 1, 1, 1);
let r: i16x4 = transmute(vdup_lane_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_laneq_s16() {
let a: i16x8 = i16x8::new(1, 1, 1, 4, 1, 6, 7, 8);
let e: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let r: i16x8 = transmute(vdupq_laneq_s16::<4>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_lane_s32() {
let a: i32x2 = i32x2::new(1, 1);
let e: i32x2 = i32x2::new(1, 1);
let r: i32x2 = transmute(vdup_lane_s32::<1>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_laneq_s32() {
let a: i32x4 = i32x4::new(1, 1, 1, 4);
let e: i32x4 = i32x4::new(1, 1, 1, 1);
let r: i32x4 = transmute(vdupq_laneq_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_laneq_s8() {
let a: i8x16 = i8x16::new(1, 1, 1, 4, 1, 6, 7, 8, 1, 10, 11, 12, 13, 14, 15, 16);
let e: i8x8 = i8x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let r: i8x8 = transmute(vdup_laneq_s8::<8>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_laneq_s16() {
let a: i16x8 = i16x8::new(1, 1, 1, 4, 1, 6, 7, 8);
let e: i16x4 = i16x4::new(1, 1, 1, 1);
let r: i16x4 = transmute(vdup_laneq_s16::<4>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_laneq_s32() {
let a: i32x4 = i32x4::new(1, 1, 1, 4);
let e: i32x2 = i32x2::new(1, 1);
let r: i32x2 = transmute(vdup_laneq_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_lane_s8() {
let a: i8x8 = i8x8::new(1, 1, 1, 4, 1, 6, 7, 8);
let e: i8x16 = i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
let r: i8x16 = transmute(vdupq_lane_s8::<4>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_lane_s16() {
let a: i16x4 = i16x4::new(1, 1, 1, 4);
let e: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let r: i16x8 = transmute(vdupq_lane_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_lane_s32() {
let a: i32x2 = i32x2::new(1, 1);
let e: i32x4 = i32x4::new(1, 1, 1, 1);
let r: i32x4 = transmute(vdupq_lane_s32::<1>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_lane_u8() {
let a: u8x8 = u8x8::new(1, 1, 1, 4, 1, 6, 7, 8);
let e: u8x8 = u8x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let r: u8x8 = transmute(vdup_lane_u8::<4>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_laneq_u8() {
let a: u8x16 = u8x16::new(1, 1, 1, 4, 1, 6, 7, 8, 1, 10, 11, 12, 13, 14, 15, 16);
let e: u8x16 = u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
let r: u8x16 = transmute(vdupq_laneq_u8::<8>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_lane_u16() {
let a: u16x4 = u16x4::new(1, 1, 1, 4);
let e: u16x4 = u16x4::new(1, 1, 1, 1);
let r: u16x4 = transmute(vdup_lane_u16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_laneq_u16() {
let a: u16x8 = u16x8::new(1, 1, 1, 4, 1, 6, 7, 8);
let e: u16x8 = u16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let r: u16x8 = transmute(vdupq_laneq_u16::<4>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_lane_u32() {
let a: u32x2 = u32x2::new(1, 1);
let e: u32x2 = u32x2::new(1, 1);
let r: u32x2 = transmute(vdup_lane_u32::<1>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_laneq_u32() {
let a: u32x4 = u32x4::new(1, 1, 1, 4);
let e: u32x4 = u32x4::new(1, 1, 1, 1);
let r: u32x4 = transmute(vdupq_laneq_u32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_laneq_u8() {
let a: u8x16 = u8x16::new(1, 1, 1, 4, 1, 6, 7, 8, 1, 10, 11, 12, 13, 14, 15, 16);
let e: u8x8 = u8x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let r: u8x8 = transmute(vdup_laneq_u8::<8>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_laneq_u16() {
let a: u16x8 = u16x8::new(1, 1, 1, 4, 1, 6, 7, 8);
let e: u16x4 = u16x4::new(1, 1, 1, 1);
let r: u16x4 = transmute(vdup_laneq_u16::<4>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_laneq_u32() {
let a: u32x4 = u32x4::new(1, 1, 1, 4);
let e: u32x2 = u32x2::new(1, 1);
let r: u32x2 = transmute(vdup_laneq_u32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_lane_u8() {
let a: u8x8 = u8x8::new(1, 1, 1, 4, 1, 6, 7, 8);
let e: u8x16 = u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
let r: u8x16 = transmute(vdupq_lane_u8::<4>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_lane_u16() {
let a: u16x4 = u16x4::new(1, 1, 1, 4);
let e: u16x8 = u16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let r: u16x8 = transmute(vdupq_lane_u16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_lane_u32() {
let a: u32x2 = u32x2::new(1, 1);
let e: u32x4 = u32x4::new(1, 1, 1, 1);
let r: u32x4 = transmute(vdupq_lane_u32::<1>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_lane_p8() {
let a: i8x8 = i8x8::new(1, 1, 1, 4, 1, 6, 7, 8);
let e: i8x8 = i8x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let r: i8x8 = transmute(vdup_lane_p8::<4>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_laneq_p8() {
let a: i8x16 = i8x16::new(1, 1, 1, 4, 1, 6, 7, 8, 1, 10, 11, 12, 13, 14, 15, 16);
let e: i8x16 = i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
let r: i8x16 = transmute(vdupq_laneq_p8::<8>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_lane_p16() {
let a: i16x4 = i16x4::new(1, 1, 1, 4);
let e: i16x4 = i16x4::new(1, 1, 1, 1);
let r: i16x4 = transmute(vdup_lane_p16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_laneq_p16() {
let a: i16x8 = i16x8::new(1, 1, 1, 4, 1, 6, 7, 8);
let e: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let r: i16x8 = transmute(vdupq_laneq_p16::<4>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_laneq_p8() {
let a: i8x16 = i8x16::new(1, 1, 1, 4, 1, 6, 7, 8, 1, 10, 11, 12, 13, 14, 15, 16);
let e: i8x8 = i8x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let r: i8x8 = transmute(vdup_laneq_p8::<8>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_laneq_p16() {
let a: i16x8 = i16x8::new(1, 1, 1, 4, 1, 6, 7, 8);
let e: i16x4 = i16x4::new(1, 1, 1, 1);
let r: i16x4 = transmute(vdup_laneq_p16::<4>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_lane_p8() {
let a: i8x8 = i8x8::new(1, 1, 1, 4, 1, 6, 7, 8);
let e: i8x16 = i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
let r: i8x16 = transmute(vdupq_lane_p8::<4>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_lane_p16() {
let a: i16x4 = i16x4::new(1, 1, 1, 4);
let e: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let r: i16x8 = transmute(vdupq_lane_p16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_laneq_s64() {
let a: i64x2 = i64x2::new(1, 1);
let e: i64x2 = i64x2::new(1, 1);
let r: i64x2 = transmute(vdupq_laneq_s64::<1>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_lane_s64() {
let a: i64x1 = i64x1::new(1);
let e: i64x2 = i64x2::new(1, 1);
let r: i64x2 = transmute(vdupq_lane_s64::<0>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_laneq_u64() {
let a: u64x2 = u64x2::new(1, 1);
let e: u64x2 = u64x2::new(1, 1);
let r: u64x2 = transmute(vdupq_laneq_u64::<1>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_lane_u64() {
let a: u64x1 = u64x1::new(1);
let e: u64x2 = u64x2::new(1, 1);
let r: u64x2 = transmute(vdupq_lane_u64::<0>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_lane_f32() {
let a: f32x2 = f32x2::new(1., 1.);
let e: f32x2 = f32x2::new(1., 1.);
let r: f32x2 = transmute(vdup_lane_f32::<1>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_laneq_f32() {
let a: f32x4 = f32x4::new(1., 1., 1., 4.);
let e: f32x4 = f32x4::new(1., 1., 1., 1.);
let r: f32x4 = transmute(vdupq_laneq_f32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_laneq_f32() {
let a: f32x4 = f32x4::new(1., 1., 1., 4.);
let e: f32x2 = f32x2::new(1., 1.);
let r: f32x2 = transmute(vdup_laneq_f32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdupq_lane_f32() {
let a: f32x2 = f32x2::new(1., 1.);
let e: f32x4 = f32x4::new(1., 1., 1., 1.);
let r: f32x4 = transmute(vdupq_lane_f32::<1>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_lane_s64() {
let a: i64x1 = i64x1::new(0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vdup_lane_s64::<0>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_lane_u64() {
let a: u64x1 = u64x1::new(0);
let e: u64x1 = u64x1::new(0);
let r: u64x1 = transmute(vdup_lane_u64::<0>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_laneq_s64() {
let a: i64x2 = i64x2::new(0, 1);
let e: i64x1 = i64x1::new(1);
let r: i64x1 = transmute(vdup_laneq_s64::<1>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vdup_laneq_u64() {
let a: u64x2 = u64x2::new(0, 1);
let e: u64x1 = u64x1::new(1);
let r: u64x1 = transmute(vdup_laneq_u64::<1>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_s8() {
let a: i8x8 = i8x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: i8x8 = i8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: i8x8 = i8x8::new(1, 2, 2, 2, 2, 2, 2, 2);
let r: i8x8 = transmute(vext_s8::<7>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_s8() {
let a: i8x16 = i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let e: i8x16 = i8x16::new(1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let r: i8x16 = transmute(vextq_s8::<15>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_s16() {
let a: i16x4 = i16x4::new(1, 1, 1, 1);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let e: i16x4 = i16x4::new(1, 2, 2, 2);
let r: i16x4 = transmute(vext_s16::<3>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_s16() {
let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: i16x8 = i16x8::new(1, 2, 2, 2, 2, 2, 2, 2);
let r: i16x8 = transmute(vextq_s16::<7>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_s32() {
let a: i32x2 = i32x2::new(1, 1);
let b: i32x2 = i32x2::new(2, 2);
let e: i32x2 = i32x2::new(1, 2);
let r: i32x2 = transmute(vext_s32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_s32() {
let a: i32x4 = i32x4::new(1, 1, 1, 1);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let e: i32x4 = i32x4::new(1, 2, 2, 2);
let r: i32x4 = transmute(vextq_s32::<3>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_u8() {
let a: u8x8 = u8x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: u8x8 = u8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: u8x8 = u8x8::new(1, 2, 2, 2, 2, 2, 2, 2);
let r: u8x8 = transmute(vext_u8::<7>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_u8() {
let a: u8x16 = u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
let b: u8x16 = u8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let e: u8x16 = u8x16::new(1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let r: u8x16 = transmute(vextq_u8::<15>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_u16() {
let a: u16x4 = u16x4::new(1, 1, 1, 1);
let b: u16x4 = u16x4::new(2, 2, 2, 2);
let e: u16x4 = u16x4::new(1, 2, 2, 2);
let r: u16x4 = transmute(vext_u16::<3>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_u16() {
let a: u16x8 = u16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: u16x8 = u16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: u16x8 = u16x8::new(1, 2, 2, 2, 2, 2, 2, 2);
let r: u16x8 = transmute(vextq_u16::<7>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_u32() {
let a: u32x2 = u32x2::new(1, 1);
let b: u32x2 = u32x2::new(2, 2);
let e: u32x2 = u32x2::new(1, 2);
let r: u32x2 = transmute(vext_u32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_u32() {
let a: u32x4 = u32x4::new(1, 1, 1, 1);
let b: u32x4 = u32x4::new(2, 2, 2, 2);
let e: u32x4 = u32x4::new(1, 2, 2, 2);
let r: u32x4 = transmute(vextq_u32::<3>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_p8() {
let a: i8x8 = i8x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: i8x8 = i8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: i8x8 = i8x8::new(1, 2, 2, 2, 2, 2, 2, 2);
let r: i8x8 = transmute(vext_p8::<7>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_p8() {
let a: i8x16 = i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let e: i8x16 = i8x16::new(1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let r: i8x16 = transmute(vextq_p8::<15>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_p16() {
let a: i16x4 = i16x4::new(1, 1, 1, 1);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let e: i16x4 = i16x4::new(1, 2, 2, 2);
let r: i16x4 = transmute(vext_p16::<3>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_p16() {
let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: i16x8 = i16x8::new(1, 2, 2, 2, 2, 2, 2, 2);
let r: i16x8 = transmute(vextq_p16::<7>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_s64() {
let a: i64x2 = i64x2::new(1, 1);
let b: i64x2 = i64x2::new(2, 2);
let e: i64x2 = i64x2::new(1, 2);
let r: i64x2 = transmute(vextq_s64::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_u64() {
let a: u64x2 = u64x2::new(1, 1);
let b: u64x2 = u64x2::new(2, 2);
let e: u64x2 = u64x2::new(1, 2);
let r: u64x2 = transmute(vextq_u64::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vext_f32() {
let a: f32x2 = f32x2::new(1., 1.);
let b: f32x2 = f32x2::new(2., 2.);
let e: f32x2 = f32x2::new(1., 2.);
let r: f32x2 = transmute(vext_f32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vextq_f32() {
let a: f32x4 = f32x4::new(1., 1., 1., 1.);
let b: f32x4 = f32x4::new(2., 2., 2., 2.);
let e: f32x4 = f32x4::new(1., 2., 2., 2.);
let r: f32x4 = transmute(vextq_f32::<3>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_s8() {
let a: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: i8x8 = i8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: i8x8 = i8x8::new(3, 3, 3, 3, 3, 3, 3, 3);
let e: i8x8 = i8x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let r: i8x8 = transmute(vmla_s8(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_s8() {
let a: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let c: i8x16 = i8x16::new(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3);
let e: i8x16 = i8x16::new(6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21);
let r: i8x16 = transmute(vmlaq_s8(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_s16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let c: i16x4 = i16x4::new(3, 3, 3, 3);
let e: i16x4 = i16x4::new(6, 7, 8, 9);
let r: i16x4 = transmute(vmla_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_s16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: i16x8 = i16x8::new(3, 3, 3, 3, 3, 3, 3, 3);
let e: i16x8 = i16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let r: i16x8 = transmute(vmlaq_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_s32() {
let a: i32x2 = i32x2::new(0, 1);
let b: i32x2 = i32x2::new(2, 2);
let c: i32x2 = i32x2::new(3, 3);
let e: i32x2 = i32x2::new(6, 7);
let r: i32x2 = transmute(vmla_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_s32() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let c: i32x4 = i32x4::new(3, 3, 3, 3);
let e: i32x4 = i32x4::new(6, 7, 8, 9);
let r: i32x4 = transmute(vmlaq_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_u8() {
let a: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: u8x8 = u8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: u8x8 = u8x8::new(3, 3, 3, 3, 3, 3, 3, 3);
let e: u8x8 = u8x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let r: u8x8 = transmute(vmla_u8(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_u8() {
let a: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let b: u8x16 = u8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let c: u8x16 = u8x16::new(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3);
let e: u8x16 = u8x16::new(6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21);
let r: u8x16 = transmute(vmlaq_u8(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_u16() {
let a: u16x4 = u16x4::new(0, 1, 2, 3);
let b: u16x4 = u16x4::new(2, 2, 2, 2);
let c: u16x4 = u16x4::new(3, 3, 3, 3);
let e: u16x4 = u16x4::new(6, 7, 8, 9);
let r: u16x4 = transmute(vmla_u16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_u16() {
let a: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: u16x8 = u16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: u16x8 = u16x8::new(3, 3, 3, 3, 3, 3, 3, 3);
let e: u16x8 = u16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let r: u16x8 = transmute(vmlaq_u16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_u32() {
let a: u32x2 = u32x2::new(0, 1);
let b: u32x2 = u32x2::new(2, 2);
let c: u32x2 = u32x2::new(3, 3);
let e: u32x2 = u32x2::new(6, 7);
let r: u32x2 = transmute(vmla_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_u32() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let b: u32x4 = u32x4::new(2, 2, 2, 2);
let c: u32x4 = u32x4::new(3, 3, 3, 3);
let e: u32x4 = u32x4::new(6, 7, 8, 9);
let r: u32x4 = transmute(vmlaq_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_f32() {
let a: f32x2 = f32x2::new(0., 1.);
let b: f32x2 = f32x2::new(2., 2.);
let c: f32x2 = f32x2::new(3., 3.);
let e: f32x2 = f32x2::new(6., 7.);
let r: f32x2 = transmute(vmla_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_f32() {
let a: f32x4 = f32x4::new(0., 1., 2., 3.);
let b: f32x4 = f32x4::new(2., 2., 2., 2.);
let c: f32x4 = f32x4::new(3., 3., 3., 3.);
let e: f32x4 = f32x4::new(6., 7., 8., 9.);
let r: f32x4 = transmute(vmlaq_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_n_s16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let c: i16 = 3;
let e: i16x4 = i16x4::new(6, 7, 8, 9);
let r: i16x4 = transmute(vmla_n_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_n_s16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: i16 = 3;
let e: i16x8 = i16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let r: i16x8 = transmute(vmlaq_n_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_n_s32() {
let a: i32x2 = i32x2::new(0, 1);
let b: i32x2 = i32x2::new(2, 2);
let c: i32 = 3;
let e: i32x2 = i32x2::new(6, 7);
let r: i32x2 = transmute(vmla_n_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_n_s32() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let c: i32 = 3;
let e: i32x4 = i32x4::new(6, 7, 8, 9);
let r: i32x4 = transmute(vmlaq_n_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_n_u16() {
let a: u16x4 = u16x4::new(0, 1, 2, 3);
let b: u16x4 = u16x4::new(2, 2, 2, 2);
let c: u16 = 3;
let e: u16x4 = u16x4::new(6, 7, 8, 9);
let r: u16x4 = transmute(vmla_n_u16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_n_u16() {
let a: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: u16x8 = u16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: u16 = 3;
let e: u16x8 = u16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let r: u16x8 = transmute(vmlaq_n_u16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_n_u32() {
let a: u32x2 = u32x2::new(0, 1);
let b: u32x2 = u32x2::new(2, 2);
let c: u32 = 3;
let e: u32x2 = u32x2::new(6, 7);
let r: u32x2 = transmute(vmla_n_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_n_u32() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let b: u32x4 = u32x4::new(2, 2, 2, 2);
let c: u32 = 3;
let e: u32x4 = u32x4::new(6, 7, 8, 9);
let r: u32x4 = transmute(vmlaq_n_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_n_f32() {
let a: f32x2 = f32x2::new(0., 1.);
let b: f32x2 = f32x2::new(2., 2.);
let c: f32 = 3.;
let e: f32x2 = f32x2::new(6., 7.);
let r: f32x2 = transmute(vmla_n_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_n_f32() {
let a: f32x4 = f32x4::new(0., 1., 2., 3.);
let b: f32x4 = f32x4::new(2., 2., 2., 2.);
let c: f32 = 3.;
let e: f32x4 = f32x4::new(6., 7., 8., 9.);
let r: f32x4 = transmute(vmlaq_n_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_lane_s16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let c: i16x4 = i16x4::new(0, 3, 0, 0);
let e: i16x4 = i16x4::new(6, 7, 8, 9);
let r: i16x4 = transmute(vmla_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_laneq_s16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let c: i16x8 = i16x8::new(0, 3, 0, 0, 0, 0, 0, 0);
let e: i16x4 = i16x4::new(6, 7, 8, 9);
let r: i16x4 = transmute(vmla_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_lane_s16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: i16x4 = i16x4::new(0, 3, 0, 0);
let e: i16x8 = i16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let r: i16x8 = transmute(vmlaq_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_laneq_s16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: i16x8 = i16x8::new(0, 3, 0, 0, 0, 0, 0, 0);
let e: i16x8 = i16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let r: i16x8 = transmute(vmlaq_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_lane_s32() {
let a: i32x2 = i32x2::new(0, 1);
let b: i32x2 = i32x2::new(2, 2);
let c: i32x2 = i32x2::new(0, 3);
let e: i32x2 = i32x2::new(6, 7);
let r: i32x2 = transmute(vmla_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_laneq_s32() {
let a: i32x2 = i32x2::new(0, 1);
let b: i32x2 = i32x2::new(2, 2);
let c: i32x4 = i32x4::new(0, 3, 0, 0);
let e: i32x2 = i32x2::new(6, 7);
let r: i32x2 = transmute(vmla_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_lane_s32() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let c: i32x2 = i32x2::new(0, 3);
let e: i32x4 = i32x4::new(6, 7, 8, 9);
let r: i32x4 = transmute(vmlaq_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_laneq_s32() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let c: i32x4 = i32x4::new(0, 3, 0, 0);
let e: i32x4 = i32x4::new(6, 7, 8, 9);
let r: i32x4 = transmute(vmlaq_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_lane_u16() {
let a: u16x4 = u16x4::new(0, 1, 2, 3);
let b: u16x4 = u16x4::new(2, 2, 2, 2);
let c: u16x4 = u16x4::new(0, 3, 0, 0);
let e: u16x4 = u16x4::new(6, 7, 8, 9);
let r: u16x4 = transmute(vmla_lane_u16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_laneq_u16() {
let a: u16x4 = u16x4::new(0, 1, 2, 3);
let b: u16x4 = u16x4::new(2, 2, 2, 2);
let c: u16x8 = u16x8::new(0, 3, 0, 0, 0, 0, 0, 0);
let e: u16x4 = u16x4::new(6, 7, 8, 9);
let r: u16x4 = transmute(vmla_laneq_u16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_lane_u16() {
let a: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: u16x8 = u16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: u16x4 = u16x4::new(0, 3, 0, 0);
let e: u16x8 = u16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let r: u16x8 = transmute(vmlaq_lane_u16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_laneq_u16() {
let a: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: u16x8 = u16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: u16x8 = u16x8::new(0, 3, 0, 0, 0, 0, 0, 0);
let e: u16x8 = u16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let r: u16x8 = transmute(vmlaq_laneq_u16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_lane_u32() {
let a: u32x2 = u32x2::new(0, 1);
let b: u32x2 = u32x2::new(2, 2);
let c: u32x2 = u32x2::new(0, 3);
let e: u32x2 = u32x2::new(6, 7);
let r: u32x2 = transmute(vmla_lane_u32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_laneq_u32() {
let a: u32x2 = u32x2::new(0, 1);
let b: u32x2 = u32x2::new(2, 2);
let c: u32x4 = u32x4::new(0, 3, 0, 0);
let e: u32x2 = u32x2::new(6, 7);
let r: u32x2 = transmute(vmla_laneq_u32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_lane_u32() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let b: u32x4 = u32x4::new(2, 2, 2, 2);
let c: u32x2 = u32x2::new(0, 3);
let e: u32x4 = u32x4::new(6, 7, 8, 9);
let r: u32x4 = transmute(vmlaq_lane_u32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_laneq_u32() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let b: u32x4 = u32x4::new(2, 2, 2, 2);
let c: u32x4 = u32x4::new(0, 3, 0, 0);
let e: u32x4 = u32x4::new(6, 7, 8, 9);
let r: u32x4 = transmute(vmlaq_laneq_u32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_lane_f32() {
let a: f32x2 = f32x2::new(0., 1.);
let b: f32x2 = f32x2::new(2., 2.);
let c: f32x2 = f32x2::new(0., 3.);
let e: f32x2 = f32x2::new(6., 7.);
let r: f32x2 = transmute(vmla_lane_f32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmla_laneq_f32() {
let a: f32x2 = f32x2::new(0., 1.);
let b: f32x2 = f32x2::new(2., 2.);
let c: f32x4 = f32x4::new(0., 3., 0., 0.);
let e: f32x2 = f32x2::new(6., 7.);
let r: f32x2 = transmute(vmla_laneq_f32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_lane_f32() {
let a: f32x4 = f32x4::new(0., 1., 2., 3.);
let b: f32x4 = f32x4::new(2., 2., 2., 2.);
let c: f32x2 = f32x2::new(0., 3.);
let e: f32x4 = f32x4::new(6., 7., 8., 9.);
let r: f32x4 = transmute(vmlaq_lane_f32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlaq_laneq_f32() {
let a: f32x4 = f32x4::new(0., 1., 2., 3.);
let b: f32x4 = f32x4::new(2., 2., 2., 2.);
let c: f32x4 = f32x4::new(0., 3., 0., 0.);
let e: f32x4 = f32x4::new(6., 7., 8., 9.);
let r: f32x4 = transmute(vmlaq_laneq_f32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_s8() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: i8x8 = i8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: i8x8 = i8x8::new(3, 3, 3, 3, 3, 3, 3, 3);
let e: i16x8 = i16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let r: i16x8 = transmute(vmlal_s8(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_s16() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let c: i16x4 = i16x4::new(3, 3, 3, 3);
let e: i32x4 = i32x4::new(6, 7, 8, 9);
let r: i32x4 = transmute(vmlal_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_s32() {
let a: i64x2 = i64x2::new(0, 1);
let b: i32x2 = i32x2::new(2, 2);
let c: i32x2 = i32x2::new(3, 3);
let e: i64x2 = i64x2::new(6, 7);
let r: i64x2 = transmute(vmlal_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_u8() {
let a: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: u8x8 = u8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: u8x8 = u8x8::new(3, 3, 3, 3, 3, 3, 3, 3);
let e: u16x8 = u16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let r: u16x8 = transmute(vmlal_u8(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_u16() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let b: u16x4 = u16x4::new(2, 2, 2, 2);
let c: u16x4 = u16x4::new(3, 3, 3, 3);
let e: u32x4 = u32x4::new(6, 7, 8, 9);
let r: u32x4 = transmute(vmlal_u16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_u32() {
let a: u64x2 = u64x2::new(0, 1);
let b: u32x2 = u32x2::new(2, 2);
let c: u32x2 = u32x2::new(3, 3);
let e: u64x2 = u64x2::new(6, 7);
let r: u64x2 = transmute(vmlal_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_n_s16() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let c: i16 = 3;
let e: i32x4 = i32x4::new(6, 7, 8, 9);
let r: i32x4 = transmute(vmlal_n_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_n_s32() {
let a: i64x2 = i64x2::new(0, 1);
let b: i32x2 = i32x2::new(2, 2);
let c: i32 = 3;
let e: i64x2 = i64x2::new(6, 7);
let r: i64x2 = transmute(vmlal_n_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_n_u16() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let b: u16x4 = u16x4::new(2, 2, 2, 2);
let c: u16 = 3;
let e: u32x4 = u32x4::new(6, 7, 8, 9);
let r: u32x4 = transmute(vmlal_n_u16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_n_u32() {
let a: u64x2 = u64x2::new(0, 1);
let b: u32x2 = u32x2::new(2, 2);
let c: u32 = 3;
let e: u64x2 = u64x2::new(6, 7);
let r: u64x2 = transmute(vmlal_n_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_lane_s16() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let c: i16x4 = i16x4::new(0, 3, 0, 0);
let e: i32x4 = i32x4::new(6, 7, 8, 9);
let r: i32x4 = transmute(vmlal_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_laneq_s16() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let c: i16x8 = i16x8::new(0, 3, 0, 0, 0, 0, 0, 0);
let e: i32x4 = i32x4::new(6, 7, 8, 9);
let r: i32x4 = transmute(vmlal_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_lane_s32() {
let a: i64x2 = i64x2::new(0, 1);
let b: i32x2 = i32x2::new(2, 2);
let c: i32x2 = i32x2::new(0, 3);
let e: i64x2 = i64x2::new(6, 7);
let r: i64x2 = transmute(vmlal_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_laneq_s32() {
let a: i64x2 = i64x2::new(0, 1);
let b: i32x2 = i32x2::new(2, 2);
let c: i32x4 = i32x4::new(0, 3, 0, 0);
let e: i64x2 = i64x2::new(6, 7);
let r: i64x2 = transmute(vmlal_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_lane_u16() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let b: u16x4 = u16x4::new(2, 2, 2, 2);
let c: u16x4 = u16x4::new(0, 3, 0, 0);
let e: u32x4 = u32x4::new(6, 7, 8, 9);
let r: u32x4 = transmute(vmlal_lane_u16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_laneq_u16() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let b: u16x4 = u16x4::new(2, 2, 2, 2);
let c: u16x8 = u16x8::new(0, 3, 0, 0, 0, 0, 0, 0);
let e: u32x4 = u32x4::new(6, 7, 8, 9);
let r: u32x4 = transmute(vmlal_laneq_u16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_lane_u32() {
let a: u64x2 = u64x2::new(0, 1);
let b: u32x2 = u32x2::new(2, 2);
let c: u32x2 = u32x2::new(0, 3);
let e: u64x2 = u64x2::new(6, 7);
let r: u64x2 = transmute(vmlal_lane_u32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlal_laneq_u32() {
let a: u64x2 = u64x2::new(0, 1);
let b: u32x2 = u32x2::new(2, 2);
let c: u32x4 = u32x4::new(0, 3, 0, 0);
let e: u64x2 = u64x2::new(6, 7);
let r: u64x2 = transmute(vmlal_laneq_u32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_s8() {
let a: i8x8 = i8x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let b: i8x8 = i8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: i8x8 = i8x8::new(3, 3, 3, 3, 3, 3, 3, 3);
let e: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i8x8 = transmute(vmls_s8(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_s8() {
let a: i8x16 = i8x16::new(6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21);
let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let c: i8x16 = i8x16::new(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3);
let e: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let r: i8x16 = transmute(vmlsq_s8(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_s16() {
let a: i16x4 = i16x4::new(6, 7, 8, 9);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let c: i16x4 = i16x4::new(3, 3, 3, 3);
let e: i16x4 = i16x4::new(0, 1, 2, 3);
let r: i16x4 = transmute(vmls_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_s16() {
let a: i16x8 = i16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: i16x8 = i16x8::new(3, 3, 3, 3, 3, 3, 3, 3);
let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i16x8 = transmute(vmlsq_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_s32() {
let a: i32x2 = i32x2::new(6, 7);
let b: i32x2 = i32x2::new(2, 2);
let c: i32x2 = i32x2::new(3, 3);
let e: i32x2 = i32x2::new(0, 1);
let r: i32x2 = transmute(vmls_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_s32() {
let a: i32x4 = i32x4::new(6, 7, 8, 9);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let c: i32x4 = i32x4::new(3, 3, 3, 3);
let e: i32x4 = i32x4::new(0, 1, 2, 3);
let r: i32x4 = transmute(vmlsq_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_u8() {
let a: u8x8 = u8x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let b: u8x8 = u8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: u8x8 = u8x8::new(3, 3, 3, 3, 3, 3, 3, 3);
let e: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: u8x8 = transmute(vmls_u8(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_u8() {
let a: u8x16 = u8x16::new(6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21);
let b: u8x16 = u8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let c: u8x16 = u8x16::new(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3);
let e: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let r: u8x16 = transmute(vmlsq_u8(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_u16() {
let a: u16x4 = u16x4::new(6, 7, 8, 9);
let b: u16x4 = u16x4::new(2, 2, 2, 2);
let c: u16x4 = u16x4::new(3, 3, 3, 3);
let e: u16x4 = u16x4::new(0, 1, 2, 3);
let r: u16x4 = transmute(vmls_u16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_u16() {
let a: u16x8 = u16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let b: u16x8 = u16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: u16x8 = u16x8::new(3, 3, 3, 3, 3, 3, 3, 3);
let e: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: u16x8 = transmute(vmlsq_u16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_u32() {
let a: u32x2 = u32x2::new(6, 7);
let b: u32x2 = u32x2::new(2, 2);
let c: u32x2 = u32x2::new(3, 3);
let e: u32x2 = u32x2::new(0, 1);
let r: u32x2 = transmute(vmls_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_u32() {
let a: u32x4 = u32x4::new(6, 7, 8, 9);
let b: u32x4 = u32x4::new(2, 2, 2, 2);
let c: u32x4 = u32x4::new(3, 3, 3, 3);
let e: u32x4 = u32x4::new(0, 1, 2, 3);
let r: u32x4 = transmute(vmlsq_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_f32() {
let a: f32x2 = f32x2::new(6., 7.);
let b: f32x2 = f32x2::new(2., 2.);
let c: f32x2 = f32x2::new(3., 3.);
let e: f32x2 = f32x2::new(0., 1.);
let r: f32x2 = transmute(vmls_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_f32() {
let a: f32x4 = f32x4::new(6., 7., 8., 9.);
let b: f32x4 = f32x4::new(2., 2., 2., 2.);
let c: f32x4 = f32x4::new(3., 3., 3., 3.);
let e: f32x4 = f32x4::new(0., 1., 2., 3.);
let r: f32x4 = transmute(vmlsq_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_n_s16() {
let a: i16x4 = i16x4::new(6, 7, 8, 9);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let c: i16 = 3;
let e: i16x4 = i16x4::new(0, 1, 2, 3);
let r: i16x4 = transmute(vmls_n_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_n_s16() {
let a: i16x8 = i16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: i16 = 3;
let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i16x8 = transmute(vmlsq_n_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_n_s32() {
let a: i32x2 = i32x2::new(6, 7);
let b: i32x2 = i32x2::new(2, 2);
let c: i32 = 3;
let e: i32x2 = i32x2::new(0, 1);
let r: i32x2 = transmute(vmls_n_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_n_s32() {
let a: i32x4 = i32x4::new(6, 7, 8, 9);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let c: i32 = 3;
let e: i32x4 = i32x4::new(0, 1, 2, 3);
let r: i32x4 = transmute(vmlsq_n_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_n_u16() {
let a: u16x4 = u16x4::new(6, 7, 8, 9);
let b: u16x4 = u16x4::new(2, 2, 2, 2);
let c: u16 = 3;
let e: u16x4 = u16x4::new(0, 1, 2, 3);
let r: u16x4 = transmute(vmls_n_u16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_n_u16() {
let a: u16x8 = u16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let b: u16x8 = u16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: u16 = 3;
let e: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: u16x8 = transmute(vmlsq_n_u16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_n_u32() {
let a: u32x2 = u32x2::new(6, 7);
let b: u32x2 = u32x2::new(2, 2);
let c: u32 = 3;
let e: u32x2 = u32x2::new(0, 1);
let r: u32x2 = transmute(vmls_n_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_n_u32() {
let a: u32x4 = u32x4::new(6, 7, 8, 9);
let b: u32x4 = u32x4::new(2, 2, 2, 2);
let c: u32 = 3;
let e: u32x4 = u32x4::new(0, 1, 2, 3);
let r: u32x4 = transmute(vmlsq_n_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_n_f32() {
let a: f32x2 = f32x2::new(6., 7.);
let b: f32x2 = f32x2::new(2., 2.);
let c: f32 = 3.;
let e: f32x2 = f32x2::new(0., 1.);
let r: f32x2 = transmute(vmls_n_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_n_f32() {
let a: f32x4 = f32x4::new(6., 7., 8., 9.);
let b: f32x4 = f32x4::new(2., 2., 2., 2.);
let c: f32 = 3.;
let e: f32x4 = f32x4::new(0., 1., 2., 3.);
let r: f32x4 = transmute(vmlsq_n_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_lane_s16() {
let a: i16x4 = i16x4::new(6, 7, 8, 9);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let c: i16x4 = i16x4::new(0, 3, 0, 0);
let e: i16x4 = i16x4::new(0, 1, 2, 3);
let r: i16x4 = transmute(vmls_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_laneq_s16() {
let a: i16x4 = i16x4::new(6, 7, 8, 9);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let c: i16x8 = i16x8::new(0, 3, 0, 0, 0, 0, 0, 0);
let e: i16x4 = i16x4::new(0, 1, 2, 3);
let r: i16x4 = transmute(vmls_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_lane_s16() {
let a: i16x8 = i16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: i16x4 = i16x4::new(0, 3, 0, 0);
let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i16x8 = transmute(vmlsq_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_laneq_s16() {
let a: i16x8 = i16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: i16x8 = i16x8::new(0, 3, 0, 0, 0, 0, 0, 0);
let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i16x8 = transmute(vmlsq_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_lane_s32() {
let a: i32x2 = i32x2::new(6, 7);
let b: i32x2 = i32x2::new(2, 2);
let c: i32x2 = i32x2::new(0, 3);
let e: i32x2 = i32x2::new(0, 1);
let r: i32x2 = transmute(vmls_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_laneq_s32() {
let a: i32x2 = i32x2::new(6, 7);
let b: i32x2 = i32x2::new(2, 2);
let c: i32x4 = i32x4::new(0, 3, 0, 0);
let e: i32x2 = i32x2::new(0, 1);
let r: i32x2 = transmute(vmls_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_lane_s32() {
let a: i32x4 = i32x4::new(6, 7, 8, 9);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let c: i32x2 = i32x2::new(0, 3);
let e: i32x4 = i32x4::new(0, 1, 2, 3);
let r: i32x4 = transmute(vmlsq_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_laneq_s32() {
let a: i32x4 = i32x4::new(6, 7, 8, 9);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let c: i32x4 = i32x4::new(0, 3, 0, 0);
let e: i32x4 = i32x4::new(0, 1, 2, 3);
let r: i32x4 = transmute(vmlsq_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_lane_u16() {
let a: u16x4 = u16x4::new(6, 7, 8, 9);
let b: u16x4 = u16x4::new(2, 2, 2, 2);
let c: u16x4 = u16x4::new(0, 3, 0, 0);
let e: u16x4 = u16x4::new(0, 1, 2, 3);
let r: u16x4 = transmute(vmls_lane_u16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_laneq_u16() {
let a: u16x4 = u16x4::new(6, 7, 8, 9);
let b: u16x4 = u16x4::new(2, 2, 2, 2);
let c: u16x8 = u16x8::new(0, 3, 0, 0, 0, 0, 0, 0);
let e: u16x4 = u16x4::new(0, 1, 2, 3);
let r: u16x4 = transmute(vmls_laneq_u16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_lane_u16() {
let a: u16x8 = u16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let b: u16x8 = u16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: u16x4 = u16x4::new(0, 3, 0, 0);
let e: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: u16x8 = transmute(vmlsq_lane_u16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_laneq_u16() {
let a: u16x8 = u16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let b: u16x8 = u16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: u16x8 = u16x8::new(0, 3, 0, 0, 0, 0, 0, 0);
let e: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: u16x8 = transmute(vmlsq_laneq_u16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_lane_u32() {
let a: u32x2 = u32x2::new(6, 7);
let b: u32x2 = u32x2::new(2, 2);
let c: u32x2 = u32x2::new(0, 3);
let e: u32x2 = u32x2::new(0, 1);
let r: u32x2 = transmute(vmls_lane_u32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_laneq_u32() {
let a: u32x2 = u32x2::new(6, 7);
let b: u32x2 = u32x2::new(2, 2);
let c: u32x4 = u32x4::new(0, 3, 0, 0);
let e: u32x2 = u32x2::new(0, 1);
let r: u32x2 = transmute(vmls_laneq_u32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_lane_u32() {
let a: u32x4 = u32x4::new(6, 7, 8, 9);
let b: u32x4 = u32x4::new(2, 2, 2, 2);
let c: u32x2 = u32x2::new(0, 3);
let e: u32x4 = u32x4::new(0, 1, 2, 3);
let r: u32x4 = transmute(vmlsq_lane_u32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_laneq_u32() {
let a: u32x4 = u32x4::new(6, 7, 8, 9);
let b: u32x4 = u32x4::new(2, 2, 2, 2);
let c: u32x4 = u32x4::new(0, 3, 0, 0);
let e: u32x4 = u32x4::new(0, 1, 2, 3);
let r: u32x4 = transmute(vmlsq_laneq_u32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_lane_f32() {
let a: f32x2 = f32x2::new(6., 7.);
let b: f32x2 = f32x2::new(2., 2.);
let c: f32x2 = f32x2::new(0., 3.);
let e: f32x2 = f32x2::new(0., 1.);
let r: f32x2 = transmute(vmls_lane_f32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmls_laneq_f32() {
let a: f32x2 = f32x2::new(6., 7.);
let b: f32x2 = f32x2::new(2., 2.);
let c: f32x4 = f32x4::new(0., 3., 0., 0.);
let e: f32x2 = f32x2::new(0., 1.);
let r: f32x2 = transmute(vmls_laneq_f32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_lane_f32() {
let a: f32x4 = f32x4::new(6., 7., 8., 9.);
let b: f32x4 = f32x4::new(2., 2., 2., 2.);
let c: f32x2 = f32x2::new(0., 3.);
let e: f32x4 = f32x4::new(0., 1., 2., 3.);
let r: f32x4 = transmute(vmlsq_lane_f32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsq_laneq_f32() {
let a: f32x4 = f32x4::new(6., 7., 8., 9.);
let b: f32x4 = f32x4::new(2., 2., 2., 2.);
let c: f32x4 = f32x4::new(0., 3., 0., 0.);
let e: f32x4 = f32x4::new(0., 1., 2., 3.);
let r: f32x4 = transmute(vmlsq_laneq_f32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_s8() {
let a: i16x8 = i16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let b: i8x8 = i8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: i8x8 = i8x8::new(3, 3, 3, 3, 3, 3, 3, 3);
let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i16x8 = transmute(vmlsl_s8(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_s16() {
let a: i32x4 = i32x4::new(6, 7, 8, 9);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let c: i16x4 = i16x4::new(3, 3, 3, 3);
let e: i32x4 = i32x4::new(0, 1, 2, 3);
let r: i32x4 = transmute(vmlsl_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_s32() {
let a: i64x2 = i64x2::new(6, 7);
let b: i32x2 = i32x2::new(2, 2);
let c: i32x2 = i32x2::new(3, 3);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vmlsl_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_u8() {
let a: u16x8 = u16x8::new(6, 7, 8, 9, 10, 11, 12, 13);
let b: u8x8 = u8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let c: u8x8 = u8x8::new(3, 3, 3, 3, 3, 3, 3, 3);
let e: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: u16x8 = transmute(vmlsl_u8(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_u16() {
let a: u32x4 = u32x4::new(6, 7, 8, 9);
let b: u16x4 = u16x4::new(2, 2, 2, 2);
let c: u16x4 = u16x4::new(3, 3, 3, 3);
let e: u32x4 = u32x4::new(0, 1, 2, 3);
let r: u32x4 = transmute(vmlsl_u16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_u32() {
let a: u64x2 = u64x2::new(6, 7);
let b: u32x2 = u32x2::new(2, 2);
let c: u32x2 = u32x2::new(3, 3);
let e: u64x2 = u64x2::new(0, 1);
let r: u64x2 = transmute(vmlsl_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_n_s16() {
let a: i32x4 = i32x4::new(6, 7, 8, 9);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let c: i16 = 3;
let e: i32x4 = i32x4::new(0, 1, 2, 3);
let r: i32x4 = transmute(vmlsl_n_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_n_s32() {
let a: i64x2 = i64x2::new(6, 7);
let b: i32x2 = i32x2::new(2, 2);
let c: i32 = 3;
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vmlsl_n_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_n_u16() {
let a: u32x4 = u32x4::new(6, 7, 8, 9);
let b: u16x4 = u16x4::new(2, 2, 2, 2);
let c: u16 = 3;
let e: u32x4 = u32x4::new(0, 1, 2, 3);
let r: u32x4 = transmute(vmlsl_n_u16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_n_u32() {
let a: u64x2 = u64x2::new(6, 7);
let b: u32x2 = u32x2::new(2, 2);
let c: u32 = 3;
let e: u64x2 = u64x2::new(0, 1);
let r: u64x2 = transmute(vmlsl_n_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_lane_s16() {
let a: i32x4 = i32x4::new(6, 7, 8, 9);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let c: i16x4 = i16x4::new(0, 3, 0, 0);
let e: i32x4 = i32x4::new(0, 1, 2, 3);
let r: i32x4 = transmute(vmlsl_lane_s16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_laneq_s16() {
let a: i32x4 = i32x4::new(6, 7, 8, 9);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let c: i16x8 = i16x8::new(0, 3, 0, 0, 0, 0, 0, 0);
let e: i32x4 = i32x4::new(0, 1, 2, 3);
let r: i32x4 = transmute(vmlsl_laneq_s16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_lane_s32() {
let a: i64x2 = i64x2::new(6, 7);
let b: i32x2 = i32x2::new(2, 2);
let c: i32x2 = i32x2::new(0, 3);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vmlsl_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_laneq_s32() {
let a: i64x2 = i64x2::new(6, 7);
let b: i32x2 = i32x2::new(2, 2);
let c: i32x4 = i32x4::new(0, 3, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vmlsl_laneq_s32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_lane_u16() {
let a: u32x4 = u32x4::new(6, 7, 8, 9);
let b: u16x4 = u16x4::new(2, 2, 2, 2);
let c: u16x4 = u16x4::new(0, 3, 0, 0);
let e: u32x4 = u32x4::new(0, 1, 2, 3);
let r: u32x4 = transmute(vmlsl_lane_u16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_laneq_u16() {
let a: u32x4 = u32x4::new(6, 7, 8, 9);
let b: u16x4 = u16x4::new(2, 2, 2, 2);
let c: u16x8 = u16x8::new(0, 3, 0, 0, 0, 0, 0, 0);
let e: u32x4 = u32x4::new(0, 1, 2, 3);
let r: u32x4 = transmute(vmlsl_laneq_u16::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_lane_u32() {
let a: u64x2 = u64x2::new(6, 7);
let b: u32x2 = u32x2::new(2, 2);
let c: u32x2 = u32x2::new(0, 3);
let e: u64x2 = u64x2::new(0, 1);
let r: u64x2 = transmute(vmlsl_lane_u32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmlsl_laneq_u32() {
let a: u64x2 = u64x2::new(6, 7);
let b: u32x2 = u32x2::new(2, 2);
let c: u32x4 = u32x4::new(0, 3, 0, 0);
let e: u64x2 = u64x2::new(0, 1);
let r: u64x2 = transmute(vmlsl_laneq_u32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vneg_s8() {
let a: i8x8 = i8x8::new(0, 1, -1, 2, -2, 3, -3, 4);
let e: i8x8 = i8x8::new(0, -1, 1, -2, 2, -3, 3, -4);
let r: i8x8 = transmute(vneg_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vnegq_s8() {
let a: i8x16 = i8x16::new(0, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8);
let e: i8x16 = i8x16::new(0, -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, -8);
let r: i8x16 = transmute(vnegq_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vneg_s16() {
let a: i16x4 = i16x4::new(0, 1, -1, 2);
let e: i16x4 = i16x4::new(0, -1, 1, -2);
let r: i16x4 = transmute(vneg_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vnegq_s16() {
let a: i16x8 = i16x8::new(0, 1, -1, 2, -2, 3, -3, 4);
let e: i16x8 = i16x8::new(0, -1, 1, -2, 2, -3, 3, -4);
let r: i16x8 = transmute(vnegq_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vneg_s32() {
let a: i32x2 = i32x2::new(0, 1);
let e: i32x2 = i32x2::new(0, -1);
let r: i32x2 = transmute(vneg_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vnegq_s32() {
let a: i32x4 = i32x4::new(0, 1, -1, 2);
let e: i32x4 = i32x4::new(0, -1, 1, -2);
let r: i32x4 = transmute(vnegq_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vneg_f32() {
let a: f32x2 = f32x2::new(0., 1.);
let e: f32x2 = f32x2::new(0., -1.);
let r: f32x2 = transmute(vneg_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vnegq_f32() {
let a: f32x4 = f32x4::new(0., 1., -1., 2.);
let e: f32x4 = f32x4::new(0., -1., 1., -2.);
let r: f32x4 = transmute(vnegq_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqneg_s8() {
let a: i8x8 = i8x8::new(-128, 0, 1, -1, 2, -2, 3, -3);
let e: i8x8 = i8x8::new(0x7F, 0, -1, 1, -2, 2, -3, 3);
let r: i8x8 = transmute(vqneg_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqnegq_s8() {
let a: i8x16 = i8x16::new(-128, 0, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7);
let e: i8x16 = i8x16::new(0x7F, 0, -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7);
let r: i8x16 = transmute(vqnegq_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqneg_s16() {
let a: i16x4 = i16x4::new(-32768, 0, 1, -1);
let e: i16x4 = i16x4::new(0x7F_FF, 0, -1, 1);
let r: i16x4 = transmute(vqneg_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqnegq_s16() {
let a: i16x8 = i16x8::new(-32768, 0, 1, -1, 2, -2, 3, -3);
let e: i16x8 = i16x8::new(0x7F_FF, 0, -1, 1, -2, 2, -3, 3);
let r: i16x8 = transmute(vqnegq_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqneg_s32() {
let a: i32x2 = i32x2::new(-2147483648, 0);
let e: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0);
let r: i32x2 = transmute(vqneg_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqnegq_s32() {
let a: i32x4 = i32x4::new(-2147483648, 0, 1, -1);
let e: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0, -1, 1);
let r: i32x4 = transmute(vqnegq_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqsub_u8() {
let a: u8x8 = u8x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u8x8 = u8x8::new(41, 40, 39, 38, 37, 36, 35, 34);
let r: u8x8 = transmute(vqsub_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqsubq_u8() {
let a: u8x16 = u8x16::new(42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42);
let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: u8x16 = u8x16::new(41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26);
let r: u8x16 = transmute(vqsubq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqsub_u16() {
let a: u16x4 = u16x4::new(42, 42, 42, 42);
let b: u16x4 = u16x4::new(1, 2, 3, 4);
let e: u16x4 = u16x4::new(41, 40, 39, 38);
let r: u16x4 = transmute(vqsub_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqsubq_u16() {
let a: u16x8 = u16x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let b: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u16x8 = u16x8::new(41, 40, 39, 38, 37, 36, 35, 34);
let r: u16x8 = transmute(vqsubq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqsub_u32() {
let a: u32x2 = u32x2::new(42, 42);
let b: u32x2 = u32x2::new(1, 2);
let e: u32x2 = u32x2::new(41, 40);
let r: u32x2 = transmute(vqsub_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqsubq_u32() {
let a: u32x4 = u32x4::new(42, 42, 42, 42);
let b: u32x4 = u32x4::new(1, 2, 3, 4);
let e: u32x4 = u32x4::new(41, 40, 39, 38);
let r: u32x4 = transmute(vqsubq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqsub_u64() {
let a: u64x1 = u64x1::new(42);
let b: u64x1 = u64x1::new(1);
let e: u64x1 = u64x1::new(41);
let r: u64x1 = transmute(vqsub_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqsubq_u64() {
let a: u64x2 = u64x2::new(42, 42);
let b: u64x2 = u64x2::new(1, 2);
let e: u64x2 = u64x2::new(41, 40);
let r: u64x2 = transmute(vqsubq_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqsub_s8() {
let a: i8x8 = i8x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: i8x8 = i8x8::new(41, 40, 39, 38, 37, 36, 35, 34);
let r: i8x8 = transmute(vqsub_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqsubq_s8() {
let a: i8x16 = i8x16::new(42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42);
let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: i8x16 = i8x16::new(41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26);
let r: i8x16 = transmute(vqsubq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqsub_s16() {
let a: i16x4 = i16x4::new(42, 42, 42, 42);
let b: i16x4 = i16x4::new(1, 2, 3, 4);
let e: i16x4 = i16x4::new(41, 40, 39, 38);
let r: i16x4 = transmute(vqsub_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqsubq_s16() {
let a: i16x8 = i16x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let b: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: i16x8 = i16x8::new(41, 40, 39, 38, 37, 36, 35, 34);
let r: i16x8 = transmute(vqsubq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqsub_s32() {
let a: i32x2 = i32x2::new(42, 42);
let b: i32x2 = i32x2::new(1, 2);
let e: i32x2 = i32x2::new(41, 40);
let r: i32x2 = transmute(vqsub_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqsubq_s32() {
let a: i32x4 = i32x4::new(42, 42, 42, 42);
let b: i32x4 = i32x4::new(1, 2, 3, 4);
let e: i32x4 = i32x4::new(41, 40, 39, 38);
let r: i32x4 = transmute(vqsubq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqsub_s64() {
let a: i64x1 = i64x1::new(42);
let b: i64x1 = i64x1::new(1);
let e: i64x1 = i64x1::new(41);
let r: i64x1 = transmute(vqsub_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqsubq_s64() {
let a: i64x2 = i64x2::new(42, 42);
let b: i64x2 = i64x2::new(1, 2);
let e: i64x2 = i64x2::new(41, 40);
let r: i64x2 = transmute(vqsubq_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhadd_u8() {
let a: u8x8 = u8x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u8x8 = u8x8::new(21, 22, 22, 23, 23, 24, 24, 25);
let r: u8x8 = transmute(vhadd_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhaddq_u8() {
let a: u8x16 = u8x16::new(42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42);
let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: u8x16 = u8x16::new(21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29);
let r: u8x16 = transmute(vhaddq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhadd_u16() {
let a: u16x4 = u16x4::new(42, 42, 42, 42);
let b: u16x4 = u16x4::new(1, 2, 3, 4);
let e: u16x4 = u16x4::new(21, 22, 22, 23);
let r: u16x4 = transmute(vhadd_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhaddq_u16() {
let a: u16x8 = u16x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let b: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u16x8 = u16x8::new(21, 22, 22, 23, 23, 24, 24, 25);
let r: u16x8 = transmute(vhaddq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhadd_u32() {
let a: u32x2 = u32x2::new(42, 42);
let b: u32x2 = u32x2::new(1, 2);
let e: u32x2 = u32x2::new(21, 22);
let r: u32x2 = transmute(vhadd_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhaddq_u32() {
let a: u32x4 = u32x4::new(42, 42, 42, 42);
let b: u32x4 = u32x4::new(1, 2, 3, 4);
let e: u32x4 = u32x4::new(21, 22, 22, 23);
let r: u32x4 = transmute(vhaddq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhadd_s8() {
let a: i8x8 = i8x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: i8x8 = i8x8::new(21, 22, 22, 23, 23, 24, 24, 25);
let r: i8x8 = transmute(vhadd_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhaddq_s8() {
let a: i8x16 = i8x16::new(42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42);
let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: i8x16 = i8x16::new(21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29);
let r: i8x16 = transmute(vhaddq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhadd_s16() {
let a: i16x4 = i16x4::new(42, 42, 42, 42);
let b: i16x4 = i16x4::new(1, 2, 3, 4);
let e: i16x4 = i16x4::new(21, 22, 22, 23);
let r: i16x4 = transmute(vhadd_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhaddq_s16() {
let a: i16x8 = i16x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let b: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: i16x8 = i16x8::new(21, 22, 22, 23, 23, 24, 24, 25);
let r: i16x8 = transmute(vhaddq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhadd_s32() {
let a: i32x2 = i32x2::new(42, 42);
let b: i32x2 = i32x2::new(1, 2);
let e: i32x2 = i32x2::new(21, 22);
let r: i32x2 = transmute(vhadd_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhaddq_s32() {
let a: i32x4 = i32x4::new(42, 42, 42, 42);
let b: i32x4 = i32x4::new(1, 2, 3, 4);
let e: i32x4 = i32x4::new(21, 22, 22, 23);
let r: i32x4 = transmute(vhaddq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrhadd_u8() {
let a: u8x8 = u8x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u8x8 = u8x8::new(22, 22, 23, 23, 24, 24, 25, 25);
let r: u8x8 = transmute(vrhadd_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrhaddq_u8() {
let a: u8x16 = u8x16::new(42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42);
let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: u8x16 = u8x16::new(22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29);
let r: u8x16 = transmute(vrhaddq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrhadd_u16() {
let a: u16x4 = u16x4::new(42, 42, 42, 42);
let b: u16x4 = u16x4::new(1, 2, 3, 4);
let e: u16x4 = u16x4::new(22, 22, 23, 23);
let r: u16x4 = transmute(vrhadd_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrhaddq_u16() {
let a: u16x8 = u16x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let b: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u16x8 = u16x8::new(22, 22, 23, 23, 24, 24, 25, 25);
let r: u16x8 = transmute(vrhaddq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrhadd_u32() {
let a: u32x2 = u32x2::new(42, 42);
let b: u32x2 = u32x2::new(1, 2);
let e: u32x2 = u32x2::new(22, 22);
let r: u32x2 = transmute(vrhadd_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrhaddq_u32() {
let a: u32x4 = u32x4::new(42, 42, 42, 42);
let b: u32x4 = u32x4::new(1, 2, 3, 4);
let e: u32x4 = u32x4::new(22, 22, 23, 23);
let r: u32x4 = transmute(vrhaddq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrhadd_s8() {
let a: i8x8 = i8x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: i8x8 = i8x8::new(22, 22, 23, 23, 24, 24, 25, 25);
let r: i8x8 = transmute(vrhadd_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrhaddq_s8() {
let a: i8x16 = i8x16::new(42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42);
let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: i8x16 = i8x16::new(22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29);
let r: i8x16 = transmute(vrhaddq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrhadd_s16() {
let a: i16x4 = i16x4::new(42, 42, 42, 42);
let b: i16x4 = i16x4::new(1, 2, 3, 4);
let e: i16x4 = i16x4::new(22, 22, 23, 23);
let r: i16x4 = transmute(vrhadd_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrhaddq_s16() {
let a: i16x8 = i16x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let b: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: i16x8 = i16x8::new(22, 22, 23, 23, 24, 24, 25, 25);
let r: i16x8 = transmute(vrhaddq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrhadd_s32() {
let a: i32x2 = i32x2::new(42, 42);
let b: i32x2 = i32x2::new(1, 2);
let e: i32x2 = i32x2::new(22, 22);
let r: i32x2 = transmute(vrhadd_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrhaddq_s32() {
let a: i32x4 = i32x4::new(42, 42, 42, 42);
let b: i32x4 = i32x4::new(1, 2, 3, 4);
let e: i32x4 = i32x4::new(22, 22, 23, 23);
let r: i32x4 = transmute(vrhaddq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrndn_f32() {
let a: f32x2 = f32x2::new(-1.5, 0.5);
let e: f32x2 = f32x2::new(-2.0, 0.0);
let r: f32x2 = transmute(vrndn_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrndnq_f32() {
let a: f32x4 = f32x4::new(-1.5, 0.5, 1.5, 2.5);
let e: f32x4 = f32x4::new(-2.0, 0.0, 2.0, 2.0);
let r: f32x4 = transmute(vrndnq_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqadd_u8() {
let a: u8x8 = u8x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u8x8 = u8x8::new(43, 44, 45, 46, 47, 48, 49, 50);
let r: u8x8 = transmute(vqadd_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqaddq_u8() {
let a: u8x16 = u8x16::new(42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42);
let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: u8x16 = u8x16::new(43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58);
let r: u8x16 = transmute(vqaddq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqadd_u16() {
let a: u16x4 = u16x4::new(42, 42, 42, 42);
let b: u16x4 = u16x4::new(1, 2, 3, 4);
let e: u16x4 = u16x4::new(43, 44, 45, 46);
let r: u16x4 = transmute(vqadd_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqaddq_u16() {
let a: u16x8 = u16x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let b: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u16x8 = u16x8::new(43, 44, 45, 46, 47, 48, 49, 50);
let r: u16x8 = transmute(vqaddq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqadd_u32() {
let a: u32x2 = u32x2::new(42, 42);
let b: u32x2 = u32x2::new(1, 2);
let e: u32x2 = u32x2::new(43, 44);
let r: u32x2 = transmute(vqadd_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqaddq_u32() {
let a: u32x4 = u32x4::new(42, 42, 42, 42);
let b: u32x4 = u32x4::new(1, 2, 3, 4);
let e: u32x4 = u32x4::new(43, 44, 45, 46);
let r: u32x4 = transmute(vqaddq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqadd_u64() {
let a: u64x1 = u64x1::new(42);
let b: u64x1 = u64x1::new(1);
let e: u64x1 = u64x1::new(43);
let r: u64x1 = transmute(vqadd_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqaddq_u64() {
let a: u64x2 = u64x2::new(42, 42);
let b: u64x2 = u64x2::new(1, 2);
let e: u64x2 = u64x2::new(43, 44);
let r: u64x2 = transmute(vqaddq_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqadd_s8() {
let a: i8x8 = i8x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: i8x8 = i8x8::new(43, 44, 45, 46, 47, 48, 49, 50);
let r: i8x8 = transmute(vqadd_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqaddq_s8() {
let a: i8x16 = i8x16::new(42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42);
let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: i8x16 = i8x16::new(43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58);
let r: i8x16 = transmute(vqaddq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqadd_s16() {
let a: i16x4 = i16x4::new(42, 42, 42, 42);
let b: i16x4 = i16x4::new(1, 2, 3, 4);
let e: i16x4 = i16x4::new(43, 44, 45, 46);
let r: i16x4 = transmute(vqadd_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqaddq_s16() {
let a: i16x8 = i16x8::new(42, 42, 42, 42, 42, 42, 42, 42);
let b: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: i16x8 = i16x8::new(43, 44, 45, 46, 47, 48, 49, 50);
let r: i16x8 = transmute(vqaddq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqadd_s32() {
let a: i32x2 = i32x2::new(42, 42);
let b: i32x2 = i32x2::new(1, 2);
let e: i32x2 = i32x2::new(43, 44);
let r: i32x2 = transmute(vqadd_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqaddq_s32() {
let a: i32x4 = i32x4::new(42, 42, 42, 42);
let b: i32x4 = i32x4::new(1, 2, 3, 4);
let e: i32x4 = i32x4::new(43, 44, 45, 46);
let r: i32x4 = transmute(vqaddq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqadd_s64() {
let a: i64x1 = i64x1::new(42);
let b: i64x1 = i64x1::new(1);
let e: i64x1 = i64x1::new(43);
let r: i64x1 = transmute(vqadd_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqaddq_s64() {
let a: i64x2 = i64x2::new(42, 42);
let b: i64x2 = i64x2::new(1, 2);
let e: i64x2 = i64x2::new(43, 44);
let r: i64x2 = transmute(vqaddq_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_s8_x2() {
let a: [i8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [i8x8; 2] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16)];
let r: [i8x8; 2] = transmute(vld1_s8_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_s16_x2() {
let a: [i16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [i16x4; 2] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8)];
let r: [i16x4; 2] = transmute(vld1_s16_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_s32_x2() {
let a: [i32; 5] = [0, 1, 2, 3, 4];
let e: [i32x2; 2] = [i32x2::new(1, 2), i32x2::new(3, 4)];
let r: [i32x2; 2] = transmute(vld1_s32_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_s64_x2() {
let a: [i64; 3] = [0, 1, 2];
let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(2)];
let r: [i64x1; 2] = transmute(vld1_s64_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_s8_x2() {
let a: [i8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [i8x16; 2] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)];
let r: [i8x16; 2] = transmute(vld1q_s8_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_s16_x2() {
let a: [i16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [i16x8; 2] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16)];
let r: [i16x8; 2] = transmute(vld1q_s16_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_s32_x2() {
let a: [i32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [i32x4; 2] = [i32x4::new(1, 2, 3, 4), i32x4::new(5, 6, 7, 8)];
let r: [i32x4; 2] = transmute(vld1q_s32_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_s64_x2() {
let a: [i64; 5] = [0, 1, 2, 3, 4];
let e: [i64x2; 2] = [i64x2::new(1, 2), i64x2::new(3, 4)];
let r: [i64x2; 2] = transmute(vld1q_s64_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_s8_x3() {
let a: [i8; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let e: [i8x8; 3] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16), i8x8::new(17, 18, 19, 20, 21, 22, 23, 24)];
let r: [i8x8; 3] = transmute(vld1_s8_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_s16_x3() {
let a: [i16; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let e: [i16x4; 3] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8), i16x4::new(9, 10, 11, 12)];
let r: [i16x4; 3] = transmute(vld1_s16_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_s32_x3() {
let a: [i32; 7] = [0, 1, 2, 3, 4, 5, 6];
let e: [i32x2; 3] = [i32x2::new(1, 2), i32x2::new(3, 4), i32x2::new(5, 6)];
let r: [i32x2; 3] = transmute(vld1_s32_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_s64_x3() {
let a: [i64; 4] = [0, 1, 2, 3];
let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(2), i64x1::new(3)];
let r: [i64x1; 3] = transmute(vld1_s64_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_s8_x3() {
let a: [i8; 49] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [i8x16; 3] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)];
let r: [i8x16; 3] = transmute(vld1q_s8_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_s16_x3() {
let a: [i16; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let e: [i16x8; 3] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16), i16x8::new(17, 18, 19, 20, 21, 22, 23, 24)];
let r: [i16x8; 3] = transmute(vld1q_s16_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_s32_x3() {
let a: [i32; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let e: [i32x4; 3] = [i32x4::new(1, 2, 3, 4), i32x4::new(5, 6, 7, 8), i32x4::new(9, 10, 11, 12)];
let r: [i32x4; 3] = transmute(vld1q_s32_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_s64_x3() {
let a: [i64; 7] = [0, 1, 2, 3, 4, 5, 6];
let e: [i64x2; 3] = [i64x2::new(1, 2), i64x2::new(3, 4), i64x2::new(5, 6)];
let r: [i64x2; 3] = transmute(vld1q_s64_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_s8_x4() {
let a: [i8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [i8x8; 4] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16), i8x8::new(17, 18, 19, 20, 21, 22, 23, 24), i8x8::new(25, 26, 27, 28, 29, 30, 31, 32)];
let r: [i8x8; 4] = transmute(vld1_s8_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_s16_x4() {
let a: [i16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [i16x4; 4] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8), i16x4::new(9, 10, 11, 12), i16x4::new(13, 14, 15, 16)];
let r: [i16x4; 4] = transmute(vld1_s16_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_s32_x4() {
let a: [i32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [i32x2; 4] = [i32x2::new(1, 2), i32x2::new(3, 4), i32x2::new(5, 6), i32x2::new(7, 8)];
let r: [i32x2; 4] = transmute(vld1_s32_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_s64_x4() {
let a: [i64; 5] = [0, 1, 2, 3, 4];
let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(2), i64x1::new(3), i64x1::new(4)];
let r: [i64x1; 4] = transmute(vld1_s64_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_s8_x4() {
let a: [i8; 65] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [i8x16; 4] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)];
let r: [i8x16; 4] = transmute(vld1q_s8_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_s16_x4() {
let a: [i16; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [i16x8; 4] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16), i16x8::new(17, 18, 19, 20, 21, 22, 23, 24), i16x8::new(25, 26, 27, 28, 29, 30, 31, 32)];
let r: [i16x8; 4] = transmute(vld1q_s16_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_s32_x4() {
let a: [i32; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [i32x4; 4] = [i32x4::new(1, 2, 3, 4), i32x4::new(5, 6, 7, 8), i32x4::new(9, 10, 11, 12), i32x4::new(13, 14, 15, 16)];
let r: [i32x4; 4] = transmute(vld1q_s32_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_s64_x4() {
let a: [i64; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [i64x2; 4] = [i64x2::new(1, 2), i64x2::new(3, 4), i64x2::new(5, 6), i64x2::new(7, 8)];
let r: [i64x2; 4] = transmute(vld1q_s64_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_u8_x2() {
let a: [u8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [u8x8; 2] = [u8x8::new(1, 2, 3, 4, 5, 6, 7, 8), u8x8::new(9, 10, 11, 12, 13, 14, 15, 16)];
let r: [u8x8; 2] = transmute(vld1_u8_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_u16_x2() {
let a: [u16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [u16x4; 2] = [u16x4::new(1, 2, 3, 4), u16x4::new(5, 6, 7, 8)];
let r: [u16x4; 2] = transmute(vld1_u16_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_u32_x2() {
let a: [u32; 5] = [0, 1, 2, 3, 4];
let e: [u32x2; 2] = [u32x2::new(1, 2), u32x2::new(3, 4)];
let r: [u32x2; 2] = transmute(vld1_u32_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_u64_x2() {
let a: [u64; 3] = [0, 1, 2];
let e: [u64x1; 2] = [u64x1::new(1), u64x1::new(2)];
let r: [u64x1; 2] = transmute(vld1_u64_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_u8_x2() {
let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [u8x16; 2] = [u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), u8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)];
let r: [u8x16; 2] = transmute(vld1q_u8_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_u16_x2() {
let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [u16x8; 2] = [u16x8::new(1, 2, 3, 4, 5, 6, 7, 8), u16x8::new(9, 10, 11, 12, 13, 14, 15, 16)];
let r: [u16x8; 2] = transmute(vld1q_u16_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_u32_x2() {
let a: [u32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [u32x4; 2] = [u32x4::new(1, 2, 3, 4), u32x4::new(5, 6, 7, 8)];
let r: [u32x4; 2] = transmute(vld1q_u32_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_u64_x2() {
let a: [u64; 5] = [0, 1, 2, 3, 4];
let e: [u64x2; 2] = [u64x2::new(1, 2), u64x2::new(3, 4)];
let r: [u64x2; 2] = transmute(vld1q_u64_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_u8_x3() {
let a: [u8; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let e: [u8x8; 3] = [u8x8::new(1, 2, 3, 4, 5, 6, 7, 8), u8x8::new(9, 10, 11, 12, 13, 14, 15, 16), u8x8::new(17, 18, 19, 20, 21, 22, 23, 24)];
let r: [u8x8; 3] = transmute(vld1_u8_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_u16_x3() {
let a: [u16; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let e: [u16x4; 3] = [u16x4::new(1, 2, 3, 4), u16x4::new(5, 6, 7, 8), u16x4::new(9, 10, 11, 12)];
let r: [u16x4; 3] = transmute(vld1_u16_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_u32_x3() {
let a: [u32; 7] = [0, 1, 2, 3, 4, 5, 6];
let e: [u32x2; 3] = [u32x2::new(1, 2), u32x2::new(3, 4), u32x2::new(5, 6)];
let r: [u32x2; 3] = transmute(vld1_u32_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_u64_x3() {
let a: [u64; 4] = [0, 1, 2, 3];
let e: [u64x1; 3] = [u64x1::new(1), u64x1::new(2), u64x1::new(3)];
let r: [u64x1; 3] = transmute(vld1_u64_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_u8_x3() {
let a: [u8; 49] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [u8x16; 3] = [u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), u8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)];
let r: [u8x16; 3] = transmute(vld1q_u8_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_u16_x3() {
let a: [u16; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let e: [u16x8; 3] = [u16x8::new(1, 2, 3, 4, 5, 6, 7, 8), u16x8::new(9, 10, 11, 12, 13, 14, 15, 16), u16x8::new(17, 18, 19, 20, 21, 22, 23, 24)];
let r: [u16x8; 3] = transmute(vld1q_u16_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_u32_x3() {
let a: [u32; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let e: [u32x4; 3] = [u32x4::new(1, 2, 3, 4), u32x4::new(5, 6, 7, 8), u32x4::new(9, 10, 11, 12)];
let r: [u32x4; 3] = transmute(vld1q_u32_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_u64_x3() {
let a: [u64; 7] = [0, 1, 2, 3, 4, 5, 6];
let e: [u64x2; 3] = [u64x2::new(1, 2), u64x2::new(3, 4), u64x2::new(5, 6)];
let r: [u64x2; 3] = transmute(vld1q_u64_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_u8_x4() {
let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [u8x8; 4] = [u8x8::new(1, 2, 3, 4, 5, 6, 7, 8), u8x8::new(9, 10, 11, 12, 13, 14, 15, 16), u8x8::new(17, 18, 19, 20, 21, 22, 23, 24), u8x8::new(25, 26, 27, 28, 29, 30, 31, 32)];
let r: [u8x8; 4] = transmute(vld1_u8_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_u16_x4() {
let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [u16x4; 4] = [u16x4::new(1, 2, 3, 4), u16x4::new(5, 6, 7, 8), u16x4::new(9, 10, 11, 12), u16x4::new(13, 14, 15, 16)];
let r: [u16x4; 4] = transmute(vld1_u16_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_u32_x4() {
let a: [u32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [u32x2; 4] = [u32x2::new(1, 2), u32x2::new(3, 4), u32x2::new(5, 6), u32x2::new(7, 8)];
let r: [u32x2; 4] = transmute(vld1_u32_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_u64_x4() {
let a: [u64; 5] = [0, 1, 2, 3, 4];
let e: [u64x1; 4] = [u64x1::new(1), u64x1::new(2), u64x1::new(3), u64x1::new(4)];
let r: [u64x1; 4] = transmute(vld1_u64_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_u8_x4() {
let a: [u8; 65] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [u8x16; 4] = [u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), u8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), u8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)];
let r: [u8x16; 4] = transmute(vld1q_u8_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_u16_x4() {
let a: [u16; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [u16x8; 4] = [u16x8::new(1, 2, 3, 4, 5, 6, 7, 8), u16x8::new(9, 10, 11, 12, 13, 14, 15, 16), u16x8::new(17, 18, 19, 20, 21, 22, 23, 24), u16x8::new(25, 26, 27, 28, 29, 30, 31, 32)];
let r: [u16x8; 4] = transmute(vld1q_u16_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_u32_x4() {
let a: [u32; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [u32x4; 4] = [u32x4::new(1, 2, 3, 4), u32x4::new(5, 6, 7, 8), u32x4::new(9, 10, 11, 12), u32x4::new(13, 14, 15, 16)];
let r: [u32x4; 4] = transmute(vld1q_u32_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_u64_x4() {
let a: [u64; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [u64x2; 4] = [u64x2::new(1, 2), u64x2::new(3, 4), u64x2::new(5, 6), u64x2::new(7, 8)];
let r: [u64x2; 4] = transmute(vld1q_u64_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_p8_x2() {
let a: [u8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [i8x8; 2] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16)];
let r: [i8x8; 2] = transmute(vld1_p8_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_p8_x3() {
let a: [u8; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let e: [i8x8; 3] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16), i8x8::new(17, 18, 19, 20, 21, 22, 23, 24)];
let r: [i8x8; 3] = transmute(vld1_p8_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_p8_x4() {
let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [i8x8; 4] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16), i8x8::new(17, 18, 19, 20, 21, 22, 23, 24), i8x8::new(25, 26, 27, 28, 29, 30, 31, 32)];
let r: [i8x8; 4] = transmute(vld1_p8_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_p8_x2() {
let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [i8x16; 2] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)];
let r: [i8x16; 2] = transmute(vld1q_p8_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_p8_x3() {
let a: [u8; 49] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [i8x16; 3] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)];
let r: [i8x16; 3] = transmute(vld1q_p8_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_p8_x4() {
let a: [u8; 65] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [i8x16; 4] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)];
let r: [i8x16; 4] = transmute(vld1q_p8_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_p16_x2() {
let a: [u16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [i16x4; 2] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8)];
let r: [i16x4; 2] = transmute(vld1_p16_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_p16_x3() {
let a: [u16; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let e: [i16x4; 3] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8), i16x4::new(9, 10, 11, 12)];
let r: [i16x4; 3] = transmute(vld1_p16_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_p16_x4() {
let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [i16x4; 4] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8), i16x4::new(9, 10, 11, 12), i16x4::new(13, 14, 15, 16)];
let r: [i16x4; 4] = transmute(vld1_p16_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_p16_x2() {
let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [i16x8; 2] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16)];
let r: [i16x8; 2] = transmute(vld1q_p16_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_p16_x3() {
let a: [u16; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let e: [i16x8; 3] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16), i16x8::new(17, 18, 19, 20, 21, 22, 23, 24)];
let r: [i16x8; 3] = transmute(vld1q_p16_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_p16_x4() {
let a: [u16; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [i16x8; 4] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16), i16x8::new(17, 18, 19, 20, 21, 22, 23, 24), i16x8::new(25, 26, 27, 28, 29, 30, 31, 32)];
let r: [i16x8; 4] = transmute(vld1q_p16_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_p64_x2() {
let a: [u64; 3] = [0, 1, 2];
let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(2)];
let r: [i64x1; 2] = transmute(vld1_p64_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_p64_x3() {
let a: [u64; 4] = [0, 1, 2, 3];
let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(2), i64x1::new(3)];
let r: [i64x1; 3] = transmute(vld1_p64_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_p64_x4() {
let a: [u64; 5] = [0, 1, 2, 3, 4];
let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(2), i64x1::new(3), i64x1::new(4)];
let r: [i64x1; 4] = transmute(vld1_p64_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_p64_x2() {
let a: [u64; 5] = [0, 1, 2, 3, 4];
let e: [i64x2; 2] = [i64x2::new(1, 2), i64x2::new(3, 4)];
let r: [i64x2; 2] = transmute(vld1q_p64_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_p64_x3() {
let a: [u64; 7] = [0, 1, 2, 3, 4, 5, 6];
let e: [i64x2; 3] = [i64x2::new(1, 2), i64x2::new(3, 4), i64x2::new(5, 6)];
let r: [i64x2; 3] = transmute(vld1q_p64_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_p64_x4() {
let a: [u64; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [i64x2; 4] = [i64x2::new(1, 2), i64x2::new(3, 4), i64x2::new(5, 6), i64x2::new(7, 8)];
let r: [i64x2; 4] = transmute(vld1q_p64_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_f32_x2() {
let a: [f32; 5] = [0., 1., 2., 3., 4.];
let e: [f32x2; 2] = [f32x2::new(1., 2.), f32x2::new(3., 4.)];
let r: [f32x2; 2] = transmute(vld1_f32_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_f32_x2() {
let a: [f32; 9] = [0., 1., 2., 3., 4., 5., 6., 7., 8.];
let e: [f32x4; 2] = [f32x4::new(1., 2., 3., 4.), f32x4::new(5., 6., 7., 8.)];
let r: [f32x4; 2] = transmute(vld1q_f32_x2(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_f32_x3() {
let a: [f32; 7] = [0., 1., 2., 3., 4., 5., 6.];
let e: [f32x2; 3] = [f32x2::new(1., 2.), f32x2::new(3., 4.), f32x2::new(5., 6.)];
let r: [f32x2; 3] = transmute(vld1_f32_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_f32_x3() {
let a: [f32; 13] = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.];
let e: [f32x4; 3] = [f32x4::new(1., 2., 3., 4.), f32x4::new(5., 6., 7., 8.), f32x4::new(9., 10., 11., 12.)];
let r: [f32x4; 3] = transmute(vld1q_f32_x3(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1_f32_x4() {
let a: [f32; 9] = [0., 1., 2., 3., 4., 5., 6., 7., 8.];
let e: [f32x2; 4] = [f32x2::new(1., 2.), f32x2::new(3., 4.), f32x2::new(5., 6.), f32x2::new(7., 8.)];
let r: [f32x2; 4] = transmute(vld1_f32_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld1q_f32_x4() {
let a: [f32; 17] = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.];
let e: [f32x4; 4] = [f32x4::new(1., 2., 3., 4.), f32x4::new(5., 6., 7., 8.), f32x4::new(9., 10., 11., 12.), f32x4::new(13., 14., 15., 16.)];
let r: [f32x4; 4] = transmute(vld1q_f32_x4(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_s8() {
let a: [i8; 17] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9];
let e: [i8x8; 2] = [i8x8::new(1, 2, 2, 3, 2, 3, 4, 5), i8x8::new(2, 3, 4, 5, 6, 7, 8, 9)];
let r: [i8x8; 2] = transmute(vld2_s8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_s16() {
let a: [i16; 9] = [0, 1, 2, 2, 3, 2, 4, 3, 5];
let e: [i16x4; 2] = [i16x4::new(1, 2, 2, 3), i16x4::new(2, 3, 4, 5)];
let r: [i16x4; 2] = transmute(vld2_s16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_s32() {
let a: [i32; 5] = [0, 1, 2, 2, 3];
let e: [i32x2; 2] = [i32x2::new(1, 2), i32x2::new(2, 3)];
let r: [i32x2; 2] = transmute(vld2_s32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_s8() {
let a: [i8; 33] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17];
let e: [i8x16; 2] = [i8x16::new(1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9), i8x16::new(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)];
let r: [i8x16; 2] = transmute(vld2q_s8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_s16() {
let a: [i16; 17] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9];
let e: [i16x8; 2] = [i16x8::new(1, 2, 2, 3, 2, 3, 4, 5), i16x8::new(2, 3, 4, 5, 6, 7, 8, 9)];
let r: [i16x8; 2] = transmute(vld2q_s16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_s32() {
let a: [i32; 9] = [0, 1, 2, 2, 3, 2, 4, 3, 5];
let e: [i32x4; 2] = [i32x4::new(1, 2, 2, 3), i32x4::new(2, 3, 4, 5)];
let r: [i32x4; 2] = transmute(vld2q_s32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_s64() {
let a: [i64; 3] = [0, 1, 2];
let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(2)];
let r: [i64x1; 2] = transmute(vld2_s64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_u8() {
let a: [u8; 17] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9];
let e: [u8x8; 2] = [u8x8::new(1, 2, 2, 3, 2, 3, 4, 5), u8x8::new(2, 3, 4, 5, 6, 7, 8, 9)];
let r: [u8x8; 2] = transmute(vld2_u8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_u16() {
let a: [u16; 9] = [0, 1, 2, 2, 3, 2, 4, 3, 5];
let e: [u16x4; 2] = [u16x4::new(1, 2, 2, 3), u16x4::new(2, 3, 4, 5)];
let r: [u16x4; 2] = transmute(vld2_u16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_u32() {
let a: [u32; 5] = [0, 1, 2, 2, 3];
let e: [u32x2; 2] = [u32x2::new(1, 2), u32x2::new(2, 3)];
let r: [u32x2; 2] = transmute(vld2_u32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_u8() {
let a: [u8; 33] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17];
let e: [u8x16; 2] = [u8x16::new(1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9), u8x16::new(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)];
let r: [u8x16; 2] = transmute(vld2q_u8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_u16() {
let a: [u16; 17] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9];
let e: [u16x8; 2] = [u16x8::new(1, 2, 2, 3, 2, 3, 4, 5), u16x8::new(2, 3, 4, 5, 6, 7, 8, 9)];
let r: [u16x8; 2] = transmute(vld2q_u16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_u32() {
let a: [u32; 9] = [0, 1, 2, 2, 3, 2, 4, 3, 5];
let e: [u32x4; 2] = [u32x4::new(1, 2, 2, 3), u32x4::new(2, 3, 4, 5)];
let r: [u32x4; 2] = transmute(vld2q_u32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_p8() {
let a: [u8; 17] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9];
let e: [i8x8; 2] = [i8x8::new(1, 2, 2, 3, 2, 3, 4, 5), i8x8::new(2, 3, 4, 5, 6, 7, 8, 9)];
let r: [i8x8; 2] = transmute(vld2_p8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_p16() {
let a: [u16; 9] = [0, 1, 2, 2, 3, 2, 4, 3, 5];
let e: [i16x4; 2] = [i16x4::new(1, 2, 2, 3), i16x4::new(2, 3, 4, 5)];
let r: [i16x4; 2] = transmute(vld2_p16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_p8() {
let a: [u8; 33] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17];
let e: [i8x16; 2] = [i8x16::new(1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9), i8x16::new(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)];
let r: [i8x16; 2] = transmute(vld2q_p8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_p16() {
let a: [u16; 17] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9];
let e: [i16x8; 2] = [i16x8::new(1, 2, 2, 3, 2, 3, 4, 5), i16x8::new(2, 3, 4, 5, 6, 7, 8, 9)];
let r: [i16x8; 2] = transmute(vld2q_p16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_u64() {
let a: [u64; 3] = [0, 1, 2];
let e: [u64x1; 2] = [u64x1::new(1), u64x1::new(2)];
let r: [u64x1; 2] = transmute(vld2_u64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_p64() {
let a: [u64; 3] = [0, 1, 2];
let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(2)];
let r: [i64x1; 2] = transmute(vld2_p64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_f32() {
let a: [f32; 5] = [0., 1., 2., 2., 3.];
let e: [f32x2; 2] = [f32x2::new(1., 2.), f32x2::new(2., 3.)];
let r: [f32x2; 2] = transmute(vld2_f32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_f32() {
let a: [f32; 9] = [0., 1., 2., 2., 3., 2., 4., 3., 5.];
let e: [f32x4; 2] = [f32x4::new(1., 2., 2., 3.), f32x4::new(2., 3., 4., 5.)];
let r: [f32x4; 2] = transmute(vld2q_f32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_dup_s8() {
let a: [i8; 17] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9];
let e: [i8x8; 2] = [i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i8x8; 2] = transmute(vld2_dup_s8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_dup_s16() {
let a: [i16; 9] = [0, 1, 1, 2, 3, 1, 4, 3, 5];
let e: [i16x4; 2] = [i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1)];
let r: [i16x4; 2] = transmute(vld2_dup_s16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_dup_s32() {
let a: [i32; 5] = [0, 1, 1, 2, 3];
let e: [i32x2; 2] = [i32x2::new(1, 1), i32x2::new(1, 1)];
let r: [i32x2; 2] = transmute(vld2_dup_s32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_dup_s8() {
let a: [i8; 33] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17];
let e: [i8x16; 2] = [i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i8x16; 2] = transmute(vld2q_dup_s8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_dup_s16() {
let a: [i16; 17] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9];
let e: [i16x8; 2] = [i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i16x8; 2] = transmute(vld2q_dup_s16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_dup_s32() {
let a: [i32; 9] = [0, 1, 1, 2, 3, 1, 4, 3, 5];
let e: [i32x4; 2] = [i32x4::new(1, 1, 1, 1), i32x4::new(1, 1, 1, 1)];
let r: [i32x4; 2] = transmute(vld2q_dup_s32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_dup_s64() {
let a: [i64; 3] = [0, 1, 1];
let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(1)];
let r: [i64x1; 2] = transmute(vld2_dup_s64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_dup_u8() {
let a: [u8; 17] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9];
let e: [u8x8; 2] = [u8x8::new(1, 1, 1, 1, 1, 1, 1, 1), u8x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [u8x8; 2] = transmute(vld2_dup_u8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_dup_u16() {
let a: [u16; 9] = [0, 1, 1, 2, 3, 1, 4, 3, 5];
let e: [u16x4; 2] = [u16x4::new(1, 1, 1, 1), u16x4::new(1, 1, 1, 1)];
let r: [u16x4; 2] = transmute(vld2_dup_u16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_dup_u32() {
let a: [u32; 5] = [0, 1, 1, 2, 3];
let e: [u32x2; 2] = [u32x2::new(1, 1), u32x2::new(1, 1)];
let r: [u32x2; 2] = transmute(vld2_dup_u32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_dup_u8() {
let a: [u8; 33] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17];
let e: [u8x16; 2] = [u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)];
let r: [u8x16; 2] = transmute(vld2q_dup_u8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_dup_u16() {
let a: [u16; 17] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9];
let e: [u16x8; 2] = [u16x8::new(1, 1, 1, 1, 1, 1, 1, 1), u16x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [u16x8; 2] = transmute(vld2q_dup_u16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_dup_u32() {
let a: [u32; 9] = [0, 1, 1, 2, 3, 1, 4, 3, 5];
let e: [u32x4; 2] = [u32x4::new(1, 1, 1, 1), u32x4::new(1, 1, 1, 1)];
let r: [u32x4; 2] = transmute(vld2q_dup_u32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_dup_p8() {
let a: [u8; 17] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9];
let e: [i8x8; 2] = [i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i8x8; 2] = transmute(vld2_dup_p8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_dup_p16() {
let a: [u16; 9] = [0, 1, 1, 2, 3, 1, 4, 3, 5];
let e: [i16x4; 2] = [i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1)];
let r: [i16x4; 2] = transmute(vld2_dup_p16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_dup_p8() {
let a: [u8; 33] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17];
let e: [i8x16; 2] = [i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i8x16; 2] = transmute(vld2q_dup_p8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_dup_p16() {
let a: [u16; 17] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9];
let e: [i16x8; 2] = [i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i16x8; 2] = transmute(vld2q_dup_p16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_dup_u64() {
let a: [u64; 3] = [0, 1, 1];
let e: [u64x1; 2] = [u64x1::new(1), u64x1::new(1)];
let r: [u64x1; 2] = transmute(vld2_dup_u64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_dup_p64() {
let a: [u64; 3] = [0, 1, 1];
let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(1)];
let r: [i64x1; 2] = transmute(vld2_dup_p64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_dup_f32() {
let a: [f32; 5] = [0., 1., 1., 2., 3.];
let e: [f32x2; 2] = [f32x2::new(1., 1.), f32x2::new(1., 1.)];
let r: [f32x2; 2] = transmute(vld2_dup_f32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_dup_f32() {
let a: [f32; 9] = [0., 1., 1., 2., 3., 1., 4., 3., 5.];
let e: [f32x4; 2] = [f32x4::new(1., 1., 1., 1.), f32x4::new(1., 1., 1., 1.)];
let r: [f32x4; 2] = transmute(vld2q_dup_f32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_lane_s8() {
let a: [i8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i8x8; 2] = [i8x8::new(0, 2, 2, 14, 2, 16, 17, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let e: [i8x8; 2] = [i8x8::new(1, 2, 2, 14, 2, 16, 17, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let r: [i8x8; 2] = transmute(vld2_lane_s8::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_lane_s16() {
let a: [i16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i16x4; 2] = [i16x4::new(0, 2, 2, 14), i16x4::new(2, 16, 17, 18)];
let e: [i16x4; 2] = [i16x4::new(1, 2, 2, 14), i16x4::new(2, 16, 17, 18)];
let r: [i16x4; 2] = transmute(vld2_lane_s16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_lane_s32() {
let a: [i32; 5] = [0, 1, 2, 3, 4];
let b: [i32x2; 2] = [i32x2::new(0, 2), i32x2::new(2, 14)];
let e: [i32x2; 2] = [i32x2::new(1, 2), i32x2::new(2, 14)];
let r: [i32x2; 2] = transmute(vld2_lane_s32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_lane_s16() {
let a: [i16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i16x8; 2] = [i16x8::new(0, 2, 2, 14, 2, 16, 17, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let e: [i16x8; 2] = [i16x8::new(1, 2, 2, 14, 2, 16, 17, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let r: [i16x8; 2] = transmute(vld2q_lane_s16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_lane_s32() {
let a: [i32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i32x4; 2] = [i32x4::new(0, 2, 2, 14), i32x4::new(2, 16, 17, 18)];
let e: [i32x4; 2] = [i32x4::new(1, 2, 2, 14), i32x4::new(2, 16, 17, 18)];
let r: [i32x4; 2] = transmute(vld2q_lane_s32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_lane_u8() {
let a: [u8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [u8x8; 2] = [u8x8::new(0, 2, 2, 14, 2, 16, 17, 18), u8x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let e: [u8x8; 2] = [u8x8::new(1, 2, 2, 14, 2, 16, 17, 18), u8x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let r: [u8x8; 2] = transmute(vld2_lane_u8::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_lane_u16() {
let a: [u16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [u16x4; 2] = [u16x4::new(0, 2, 2, 14), u16x4::new(2, 16, 17, 18)];
let e: [u16x4; 2] = [u16x4::new(1, 2, 2, 14), u16x4::new(2, 16, 17, 18)];
let r: [u16x4; 2] = transmute(vld2_lane_u16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_lane_u32() {
let a: [u32; 5] = [0, 1, 2, 3, 4];
let b: [u32x2; 2] = [u32x2::new(0, 2), u32x2::new(2, 14)];
let e: [u32x2; 2] = [u32x2::new(1, 2), u32x2::new(2, 14)];
let r: [u32x2; 2] = transmute(vld2_lane_u32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_lane_u16() {
let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [u16x8; 2] = [u16x8::new(0, 2, 2, 14, 2, 16, 17, 18), u16x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let e: [u16x8; 2] = [u16x8::new(1, 2, 2, 14, 2, 16, 17, 18), u16x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let r: [u16x8; 2] = transmute(vld2q_lane_u16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_lane_u32() {
let a: [u32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [u32x4; 2] = [u32x4::new(0, 2, 2, 14), u32x4::new(2, 16, 17, 18)];
let e: [u32x4; 2] = [u32x4::new(1, 2, 2, 14), u32x4::new(2, 16, 17, 18)];
let r: [u32x4; 2] = transmute(vld2q_lane_u32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_lane_p8() {
let a: [u8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i8x8; 2] = [i8x8::new(0, 2, 2, 14, 2, 16, 17, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let e: [i8x8; 2] = [i8x8::new(1, 2, 2, 14, 2, 16, 17, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let r: [i8x8; 2] = transmute(vld2_lane_p8::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_lane_p16() {
let a: [u16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i16x4; 2] = [i16x4::new(0, 2, 2, 14), i16x4::new(2, 16, 17, 18)];
let e: [i16x4; 2] = [i16x4::new(1, 2, 2, 14), i16x4::new(2, 16, 17, 18)];
let r: [i16x4; 2] = transmute(vld2_lane_p16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_lane_p16() {
let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i16x8; 2] = [i16x8::new(0, 2, 2, 14, 2, 16, 17, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let e: [i16x8; 2] = [i16x8::new(1, 2, 2, 14, 2, 16, 17, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let r: [i16x8; 2] = transmute(vld2q_lane_p16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2_lane_f32() {
let a: [f32; 5] = [0., 1., 2., 3., 4.];
let b: [f32x2; 2] = [f32x2::new(0., 2.), f32x2::new(2., 14.)];
let e: [f32x2; 2] = [f32x2::new(1., 2.), f32x2::new(2., 14.)];
let r: [f32x2; 2] = transmute(vld2_lane_f32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld2q_lane_f32() {
let a: [f32; 9] = [0., 1., 2., 3., 4., 5., 6., 7., 8.];
let b: [f32x4; 2] = [f32x4::new(0., 2., 2., 14.), f32x4::new(2., 16., 17., 18.)];
let e: [f32x4; 2] = [f32x4::new(1., 2., 2., 14.), f32x4::new(2., 16., 17., 18.)];
let r: [f32x4; 2] = transmute(vld2q_lane_f32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_s8() {
let a: [i8; 25] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16];
let e: [i8x8; 3] = [i8x8::new(1, 2, 2, 4, 2, 4, 7, 8), i8x8::new(2, 4, 7, 8, 13, 14, 15, 16), i8x8::new(2, 4, 7, 8, 13, 14, 15, 16)];
let r: [i8x8; 3] = transmute(vld3_s8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_s16() {
let a: [i16; 13] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8];
let e: [i16x4; 3] = [i16x4::new(1, 2, 2, 4), i16x4::new(2, 4, 7, 8), i16x4::new(2, 4, 7, 8)];
let r: [i16x4; 3] = transmute(vld3_s16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_s32() {
let a: [i32; 7] = [0, 1, 2, 2, 2, 4, 4];
let e: [i32x2; 3] = [i32x2::new(1, 2), i32x2::new(2, 4), i32x2::new(2, 4)];
let r: [i32x2; 3] = transmute(vld3_s32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_s8() {
let a: [i8; 49] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16, 2, 25, 41, 4, 26, 42, 7, 27, 43, 8, 28, 44, 13, 29, 45, 14, 30, 46, 15, 31, 47, 16, 32, 48];
let e: [i8x16; 3] = [i8x16::new(1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16), i8x16::new(2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32), i8x16::new(2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48)];
let r: [i8x16; 3] = transmute(vld3q_s8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_s16() {
let a: [i16; 25] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16];
let e: [i16x8; 3] = [i16x8::new(1, 2, 2, 4, 2, 4, 7, 8), i16x8::new(2, 4, 7, 8, 13, 14, 15, 16), i16x8::new(2, 4, 7, 8, 13, 14, 15, 16)];
let r: [i16x8; 3] = transmute(vld3q_s16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_s32() {
let a: [i32; 13] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8];
let e: [i32x4; 3] = [i32x4::new(1, 2, 2, 4), i32x4::new(2, 4, 7, 8), i32x4::new(2, 4, 7, 8)];
let r: [i32x4; 3] = transmute(vld3q_s32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_s64() {
let a: [i64; 4] = [0, 1, 2, 2];
let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(2), i64x1::new(2)];
let r: [i64x1; 3] = transmute(vld3_s64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_u8() {
let a: [u8; 25] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16];
let e: [u8x8; 3] = [u8x8::new(1, 2, 2, 4, 2, 4, 7, 8), u8x8::new(2, 4, 7, 8, 13, 14, 15, 16), u8x8::new(2, 4, 7, 8, 13, 14, 15, 16)];
let r: [u8x8; 3] = transmute(vld3_u8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_u16() {
let a: [u16; 13] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8];
let e: [u16x4; 3] = [u16x4::new(1, 2, 2, 4), u16x4::new(2, 4, 7, 8), u16x4::new(2, 4, 7, 8)];
let r: [u16x4; 3] = transmute(vld3_u16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_u32() {
let a: [u32; 7] = [0, 1, 2, 2, 2, 4, 4];
let e: [u32x2; 3] = [u32x2::new(1, 2), u32x2::new(2, 4), u32x2::new(2, 4)];
let r: [u32x2; 3] = transmute(vld3_u32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_u8() {
let a: [u8; 49] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16, 2, 25, 41, 4, 26, 42, 7, 27, 43, 8, 28, 44, 13, 29, 45, 14, 30, 46, 15, 31, 47, 16, 32, 48];
let e: [u8x16; 3] = [u8x16::new(1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16), u8x16::new(2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32), u8x16::new(2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48)];
let r: [u8x16; 3] = transmute(vld3q_u8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_u16() {
let a: [u16; 25] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16];
let e: [u16x8; 3] = [u16x8::new(1, 2, 2, 4, 2, 4, 7, 8), u16x8::new(2, 4, 7, 8, 13, 14, 15, 16), u16x8::new(2, 4, 7, 8, 13, 14, 15, 16)];
let r: [u16x8; 3] = transmute(vld3q_u16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_u32() {
let a: [u32; 13] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8];
let e: [u32x4; 3] = [u32x4::new(1, 2, 2, 4), u32x4::new(2, 4, 7, 8), u32x4::new(2, 4, 7, 8)];
let r: [u32x4; 3] = transmute(vld3q_u32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_p8() {
let a: [u8; 25] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16];
let e: [i8x8; 3] = [i8x8::new(1, 2, 2, 4, 2, 4, 7, 8), i8x8::new(2, 4, 7, 8, 13, 14, 15, 16), i8x8::new(2, 4, 7, 8, 13, 14, 15, 16)];
let r: [i8x8; 3] = transmute(vld3_p8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_p16() {
let a: [u16; 13] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8];
let e: [i16x4; 3] = [i16x4::new(1, 2, 2, 4), i16x4::new(2, 4, 7, 8), i16x4::new(2, 4, 7, 8)];
let r: [i16x4; 3] = transmute(vld3_p16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_p8() {
let a: [u8; 49] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16, 2, 25, 41, 4, 26, 42, 7, 27, 43, 8, 28, 44, 13, 29, 45, 14, 30, 46, 15, 31, 47, 16, 32, 48];
let e: [i8x16; 3] = [i8x16::new(1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16), i8x16::new(2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32), i8x16::new(2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48)];
let r: [i8x16; 3] = transmute(vld3q_p8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_p16() {
let a: [u16; 25] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16];
let e: [i16x8; 3] = [i16x8::new(1, 2, 2, 4, 2, 4, 7, 8), i16x8::new(2, 4, 7, 8, 13, 14, 15, 16), i16x8::new(2, 4, 7, 8, 13, 14, 15, 16)];
let r: [i16x8; 3] = transmute(vld3q_p16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_u64() {
let a: [u64; 4] = [0, 1, 2, 2];
let e: [u64x1; 3] = [u64x1::new(1), u64x1::new(2), u64x1::new(2)];
let r: [u64x1; 3] = transmute(vld3_u64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_p64() {
let a: [u64; 4] = [0, 1, 2, 2];
let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(2), i64x1::new(2)];
let r: [i64x1; 3] = transmute(vld3_p64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_f32() {
let a: [f32; 7] = [0., 1., 2., 2., 2., 4., 4.];
let e: [f32x2; 3] = [f32x2::new(1., 2.), f32x2::new(2., 4.), f32x2::new(2., 4.)];
let r: [f32x2; 3] = transmute(vld3_f32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_f32() {
let a: [f32; 13] = [0., 1., 2., 2., 2., 4., 4., 2., 7., 7., 4., 8., 8.];
let e: [f32x4; 3] = [f32x4::new(1., 2., 2., 4.), f32x4::new(2., 4., 7., 8.), f32x4::new(2., 4., 7., 8.)];
let r: [f32x4; 3] = transmute(vld3q_f32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_dup_s8() {
let a: [i8; 25] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13];
let e: [i8x8; 3] = [i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i8x8; 3] = transmute(vld3_dup_s8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_dup_s16() {
let a: [i16; 13] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7];
let e: [i16x4; 3] = [i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1)];
let r: [i16x4; 3] = transmute(vld3_dup_s16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_dup_s32() {
let a: [i32; 7] = [0, 1, 1, 1, 3, 1, 4];
let e: [i32x2; 3] = [i32x2::new(1, 1), i32x2::new(1, 1), i32x2::new(1, 1)];
let r: [i32x2; 3] = transmute(vld3_dup_s32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_dup_s8() {
let a: [i8; 49] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17, 6, 14, 7, 15, 8, 16, 9, 17, 6, 14, 7, 15, 8, 16, 9, 17];
let e: [i8x16; 3] = [i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i8x16; 3] = transmute(vld3q_dup_s8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_dup_s16() {
let a: [i16; 25] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13];
let e: [i16x8; 3] = [i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i16x8; 3] = transmute(vld3q_dup_s16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_dup_s32() {
let a: [i32; 13] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7];
let e: [i32x4; 3] = [i32x4::new(1, 1, 1, 1), i32x4::new(1, 1, 1, 1), i32x4::new(1, 1, 1, 1)];
let r: [i32x4; 3] = transmute(vld3q_dup_s32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_dup_s64() {
let a: [i64; 4] = [0, 1, 1, 1];
let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(1), i64x1::new(1)];
let r: [i64x1; 3] = transmute(vld3_dup_s64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_dup_u8() {
let a: [u8; 25] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13];
let e: [u8x8; 3] = [u8x8::new(1, 1, 1, 1, 1, 1, 1, 1), u8x8::new(1, 1, 1, 1, 1, 1, 1, 1), u8x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [u8x8; 3] = transmute(vld3_dup_u8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_dup_u16() {
let a: [u16; 13] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7];
let e: [u16x4; 3] = [u16x4::new(1, 1, 1, 1), u16x4::new(1, 1, 1, 1), u16x4::new(1, 1, 1, 1)];
let r: [u16x4; 3] = transmute(vld3_dup_u16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_dup_u32() {
let a: [u32; 7] = [0, 1, 1, 1, 3, 1, 4];
let e: [u32x2; 3] = [u32x2::new(1, 1), u32x2::new(1, 1), u32x2::new(1, 1)];
let r: [u32x2; 3] = transmute(vld3_dup_u32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_dup_u8() {
let a: [u8; 49] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17, 6, 14, 7, 15, 8, 16, 9, 17, 6, 14, 7, 15, 8, 16, 9, 17];
let e: [u8x16; 3] = [u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)];
let r: [u8x16; 3] = transmute(vld3q_dup_u8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_dup_u16() {
let a: [u16; 25] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13];
let e: [u16x8; 3] = [u16x8::new(1, 1, 1, 1, 1, 1, 1, 1), u16x8::new(1, 1, 1, 1, 1, 1, 1, 1), u16x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [u16x8; 3] = transmute(vld3q_dup_u16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_dup_u32() {
let a: [u32; 13] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7];
let e: [u32x4; 3] = [u32x4::new(1, 1, 1, 1), u32x4::new(1, 1, 1, 1), u32x4::new(1, 1, 1, 1)];
let r: [u32x4; 3] = transmute(vld3q_dup_u32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_dup_p8() {
let a: [u8; 25] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13];
let e: [i8x8; 3] = [i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i8x8; 3] = transmute(vld3_dup_p8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_dup_p16() {
let a: [u16; 13] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7];
let e: [i16x4; 3] = [i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1)];
let r: [i16x4; 3] = transmute(vld3_dup_p16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_dup_p8() {
let a: [u8; 49] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17, 6, 14, 7, 15, 8, 16, 9, 17, 6, 14, 7, 15, 8, 16, 9, 17];
let e: [i8x16; 3] = [i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i8x16; 3] = transmute(vld3q_dup_p8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_dup_p16() {
let a: [u16; 25] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13];
let e: [i16x8; 3] = [i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i16x8; 3] = transmute(vld3q_dup_p16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_dup_u64() {
let a: [u64; 4] = [0, 1, 1, 1];
let e: [u64x1; 3] = [u64x1::new(1), u64x1::new(1), u64x1::new(1)];
let r: [u64x1; 3] = transmute(vld3_dup_u64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_dup_p64() {
let a: [u64; 4] = [0, 1, 1, 1];
let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(1), i64x1::new(1)];
let r: [i64x1; 3] = transmute(vld3_dup_p64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_dup_f32() {
let a: [f32; 7] = [0., 1., 1., 1., 3., 1., 4.];
let e: [f32x2; 3] = [f32x2::new(1., 1.), f32x2::new(1., 1.), f32x2::new(1., 1.)];
let r: [f32x2; 3] = transmute(vld3_dup_f32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_dup_f32() {
let a: [f32; 13] = [0., 1., 1., 1., 3., 1., 4., 3., 5., 1., 4., 3., 5.];
let e: [f32x4; 3] = [f32x4::new(1., 1., 1., 1.), f32x4::new(1., 1., 1., 1.), f32x4::new(1., 1., 1., 1.)];
let r: [f32x4; 3] = transmute(vld3q_dup_f32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_lane_s8() {
let a: [i8; 25] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i8x8; 3] = [i8x8::new(0, 2, 2, 14, 2, 16, 17, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26), i8x8::new(11, 12, 13, 14, 15, 16, 17, 18)];
let e: [i8x8; 3] = [i8x8::new(1, 2, 2, 14, 2, 16, 17, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26), i8x8::new(2, 12, 13, 14, 15, 16, 17, 18)];
let r: [i8x8; 3] = transmute(vld3_lane_s8::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_lane_s16() {
let a: [i16; 13] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4];
let b: [i16x4; 3] = [i16x4::new(0, 2, 2, 14), i16x4::new(2, 16, 17, 18), i16x4::new(2, 20, 21, 22)];
let e: [i16x4; 3] = [i16x4::new(1, 2, 2, 14), i16x4::new(2, 16, 17, 18), i16x4::new(2, 20, 21, 22)];
let r: [i16x4; 3] = transmute(vld3_lane_s16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_lane_s32() {
let a: [i32; 7] = [0, 1, 2, 2, 4, 5, 6];
let b: [i32x2; 3] = [i32x2::new(0, 2), i32x2::new(2, 14), i32x2::new(2, 16)];
let e: [i32x2; 3] = [i32x2::new(1, 2), i32x2::new(2, 14), i32x2::new(2, 16)];
let r: [i32x2; 3] = transmute(vld3_lane_s32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_lane_s16() {
let a: [i16; 25] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i16x8; 3] = [i16x8::new(0, 2, 2, 14, 2, 16, 17, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26), i16x8::new(11, 12, 13, 14, 15, 16, 17, 18)];
let e: [i16x8; 3] = [i16x8::new(1, 2, 2, 14, 2, 16, 17, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26), i16x8::new(2, 12, 13, 14, 15, 16, 17, 18)];
let r: [i16x8; 3] = transmute(vld3q_lane_s16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_lane_s32() {
let a: [i32; 13] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4];
let b: [i32x4; 3] = [i32x4::new(0, 2, 2, 14), i32x4::new(2, 16, 17, 18), i32x4::new(2, 20, 21, 22)];
let e: [i32x4; 3] = [i32x4::new(1, 2, 2, 14), i32x4::new(2, 16, 17, 18), i32x4::new(2, 20, 21, 22)];
let r: [i32x4; 3] = transmute(vld3q_lane_s32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_lane_u8() {
let a: [u8; 25] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [u8x8; 3] = [u8x8::new(0, 2, 2, 14, 2, 16, 17, 18), u8x8::new(2, 20, 21, 22, 23, 24, 25, 26), u8x8::new(11, 12, 13, 14, 15, 16, 17, 18)];
let e: [u8x8; 3] = [u8x8::new(1, 2, 2, 14, 2, 16, 17, 18), u8x8::new(2, 20, 21, 22, 23, 24, 25, 26), u8x8::new(2, 12, 13, 14, 15, 16, 17, 18)];
let r: [u8x8; 3] = transmute(vld3_lane_u8::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_lane_u16() {
let a: [u16; 13] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4];
let b: [u16x4; 3] = [u16x4::new(0, 2, 2, 14), u16x4::new(2, 16, 17, 18), u16x4::new(2, 20, 21, 22)];
let e: [u16x4; 3] = [u16x4::new(1, 2, 2, 14), u16x4::new(2, 16, 17, 18), u16x4::new(2, 20, 21, 22)];
let r: [u16x4; 3] = transmute(vld3_lane_u16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_lane_u32() {
let a: [u32; 7] = [0, 1, 2, 2, 4, 5, 6];
let b: [u32x2; 3] = [u32x2::new(0, 2), u32x2::new(2, 14), u32x2::new(2, 16)];
let e: [u32x2; 3] = [u32x2::new(1, 2), u32x2::new(2, 14), u32x2::new(2, 16)];
let r: [u32x2; 3] = transmute(vld3_lane_u32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_lane_u16() {
let a: [u16; 25] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [u16x8; 3] = [u16x8::new(0, 2, 2, 14, 2, 16, 17, 18), u16x8::new(2, 20, 21, 22, 23, 24, 25, 26), u16x8::new(11, 12, 13, 14, 15, 16, 17, 18)];
let e: [u16x8; 3] = [u16x8::new(1, 2, 2, 14, 2, 16, 17, 18), u16x8::new(2, 20, 21, 22, 23, 24, 25, 26), u16x8::new(2, 12, 13, 14, 15, 16, 17, 18)];
let r: [u16x8; 3] = transmute(vld3q_lane_u16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_lane_u32() {
let a: [u32; 13] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4];
let b: [u32x4; 3] = [u32x4::new(0, 2, 2, 14), u32x4::new(2, 16, 17, 18), u32x4::new(2, 20, 21, 22)];
let e: [u32x4; 3] = [u32x4::new(1, 2, 2, 14), u32x4::new(2, 16, 17, 18), u32x4::new(2, 20, 21, 22)];
let r: [u32x4; 3] = transmute(vld3q_lane_u32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_lane_p8() {
let a: [u8; 25] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i8x8; 3] = [i8x8::new(0, 2, 2, 14, 2, 16, 17, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26), i8x8::new(11, 12, 13, 14, 15, 16, 17, 18)];
let e: [i8x8; 3] = [i8x8::new(1, 2, 2, 14, 2, 16, 17, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26), i8x8::new(2, 12, 13, 14, 15, 16, 17, 18)];
let r: [i8x8; 3] = transmute(vld3_lane_p8::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_lane_p16() {
let a: [u16; 13] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4];
let b: [i16x4; 3] = [i16x4::new(0, 2, 2, 14), i16x4::new(2, 16, 17, 18), i16x4::new(2, 20, 21, 22)];
let e: [i16x4; 3] = [i16x4::new(1, 2, 2, 14), i16x4::new(2, 16, 17, 18), i16x4::new(2, 20, 21, 22)];
let r: [i16x4; 3] = transmute(vld3_lane_p16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_lane_p16() {
let a: [u16; 25] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i16x8; 3] = [i16x8::new(0, 2, 2, 14, 2, 16, 17, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26), i16x8::new(11, 12, 13, 14, 15, 16, 17, 18)];
let e: [i16x8; 3] = [i16x8::new(1, 2, 2, 14, 2, 16, 17, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26), i16x8::new(2, 12, 13, 14, 15, 16, 17, 18)];
let r: [i16x8; 3] = transmute(vld3q_lane_p16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3_lane_f32() {
let a: [f32; 7] = [0., 1., 2., 2., 4., 5., 6.];
let b: [f32x2; 3] = [f32x2::new(0., 2.), f32x2::new(2., 14.), f32x2::new(9., 16.)];
let e: [f32x2; 3] = [f32x2::new(1., 2.), f32x2::new(2., 14.), f32x2::new(2., 16.)];
let r: [f32x2; 3] = transmute(vld3_lane_f32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld3q_lane_f32() {
let a: [f32; 13] = [0., 1., 2., 2., 4., 5., 6., 7., 8., 5., 6., 7., 8.];
let b: [f32x4; 3] = [f32x4::new(0., 2., 2., 14.), f32x4::new(9., 16., 17., 18.), f32x4::new(5., 6., 7., 8.)];
let e: [f32x4; 3] = [f32x4::new(1., 2., 2., 14.), f32x4::new(2., 16., 17., 18.), f32x4::new(2., 6., 7., 8.)];
let r: [f32x4; 3] = transmute(vld3q_lane_f32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_s8() {
let a: [i8; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [i8x8; 4] = [i8x8::new(1, 2, 2, 6, 2, 6, 6, 8), i8x8::new(2, 6, 6, 8, 6, 8, 8, 16), i8x8::new(2, 6, 6, 8, 6, 8, 8, 16), i8x8::new(6, 8, 8, 16, 8, 16, 16, 32)];
let r: [i8x8; 4] = transmute(vld4_s8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_s16() {
let a: [i16; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let e: [i16x4; 4] = [i16x4::new(1, 2, 2, 6), i16x4::new(2, 6, 6, 8), i16x4::new(2, 6, 6, 8), i16x4::new(6, 8, 8, 16)];
let r: [i16x4; 4] = transmute(vld4_s16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_s32() {
let a: [i32; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
let e: [i32x2; 4] = [i32x2::new(1, 2), i32x2::new(2, 6), i32x2::new(2, 6), i32x2::new(6, 8)];
let r: [i32x2; 4] = transmute(vld4_s32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_s8() {
let a: [i8; 65] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64];
let e: [i8x16; 4] = [i8x16::new(1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16), i8x16::new(2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32), i8x16::new(2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48), i8x16::new(6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64)];
let r: [i8x16; 4] = transmute(vld4q_s8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_s16() {
let a: [i16; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [i16x8; 4] = [i16x8::new(1, 2, 2, 6, 2, 6, 6, 8), i16x8::new(2, 6, 6, 8, 6, 8, 8, 16), i16x8::new(2, 6, 6, 8, 6, 8, 8, 16), i16x8::new(6, 8, 8, 16, 8, 16, 16, 32)];
let r: [i16x8; 4] = transmute(vld4q_s16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_s32() {
let a: [i32; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let e: [i32x4; 4] = [i32x4::new(1, 2, 2, 6), i32x4::new(2, 6, 6, 8), i32x4::new(2, 6, 6, 8), i32x4::new(6, 8, 8, 16)];
let r: [i32x4; 4] = transmute(vld4q_s32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_s64() {
let a: [i64; 5] = [0, 1, 2, 2, 6];
let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(2), i64x1::new(2), i64x1::new(6)];
let r: [i64x1; 4] = transmute(vld4_s64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_u8() {
let a: [u8; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [u8x8; 4] = [u8x8::new(1, 2, 2, 6, 2, 6, 6, 8), u8x8::new(2, 6, 6, 8, 6, 8, 8, 16), u8x8::new(2, 6, 6, 8, 6, 8, 8, 16), u8x8::new(6, 8, 8, 16, 8, 16, 16, 32)];
let r: [u8x8; 4] = transmute(vld4_u8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_u16() {
let a: [u16; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let e: [u16x4; 4] = [u16x4::new(1, 2, 2, 6), u16x4::new(2, 6, 6, 8), u16x4::new(2, 6, 6, 8), u16x4::new(6, 8, 8, 16)];
let r: [u16x4; 4] = transmute(vld4_u16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_u32() {
let a: [u32; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
let e: [u32x2; 4] = [u32x2::new(1, 2), u32x2::new(2, 6), u32x2::new(2, 6), u32x2::new(6, 8)];
let r: [u32x2; 4] = transmute(vld4_u32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_u8() {
let a: [u8; 65] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64];
let e: [u8x16; 4] = [u8x16::new(1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16), u8x16::new(2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32), u8x16::new(2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48), u8x16::new(6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64)];
let r: [u8x16; 4] = transmute(vld4q_u8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_u16() {
let a: [u16; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [u16x8; 4] = [u16x8::new(1, 2, 2, 6, 2, 6, 6, 8), u16x8::new(2, 6, 6, 8, 6, 8, 8, 16), u16x8::new(2, 6, 6, 8, 6, 8, 8, 16), u16x8::new(6, 8, 8, 16, 8, 16, 16, 32)];
let r: [u16x8; 4] = transmute(vld4q_u16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_u32() {
let a: [u32; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let e: [u32x4; 4] = [u32x4::new(1, 2, 2, 6), u32x4::new(2, 6, 6, 8), u32x4::new(2, 6, 6, 8), u32x4::new(6, 8, 8, 16)];
let r: [u32x4; 4] = transmute(vld4q_u32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_p8() {
let a: [u8; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [i8x8; 4] = [i8x8::new(1, 2, 2, 6, 2, 6, 6, 8), i8x8::new(2, 6, 6, 8, 6, 8, 8, 16), i8x8::new(2, 6, 6, 8, 6, 8, 8, 16), i8x8::new(6, 8, 8, 16, 8, 16, 16, 32)];
let r: [i8x8; 4] = transmute(vld4_p8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_p16() {
let a: [u16; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let e: [i16x4; 4] = [i16x4::new(1, 2, 2, 6), i16x4::new(2, 6, 6, 8), i16x4::new(2, 6, 6, 8), i16x4::new(6, 8, 8, 16)];
let r: [i16x4; 4] = transmute(vld4_p16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_p8() {
let a: [u8; 65] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64];
let e: [i8x16; 4] = [i8x16::new(1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16), i8x16::new(2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32), i8x16::new(2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48), i8x16::new(6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64)];
let r: [i8x16; 4] = transmute(vld4q_p8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_p16() {
let a: [u16; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [i16x8; 4] = [i16x8::new(1, 2, 2, 6, 2, 6, 6, 8), i16x8::new(2, 6, 6, 8, 6, 8, 8, 16), i16x8::new(2, 6, 6, 8, 6, 8, 8, 16), i16x8::new(6, 8, 8, 16, 8, 16, 16, 32)];
let r: [i16x8; 4] = transmute(vld4q_p16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_u64() {
let a: [u64; 5] = [0, 1, 2, 2, 6];
let e: [u64x1; 4] = [u64x1::new(1), u64x1::new(2), u64x1::new(2), u64x1::new(6)];
let r: [u64x1; 4] = transmute(vld4_u64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_p64() {
let a: [u64; 5] = [0, 1, 2, 2, 6];
let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(2), i64x1::new(2), i64x1::new(6)];
let r: [i64x1; 4] = transmute(vld4_p64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_f32() {
let a: [f32; 9] = [0., 1., 2., 2., 6., 2., 6., 6., 8.];
let e: [f32x2; 4] = [f32x2::new(1., 2.), f32x2::new(2., 6.), f32x2::new(2., 6.), f32x2::new(6., 8.)];
let r: [f32x2; 4] = transmute(vld4_f32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_f32() {
let a: [f32; 17] = [0., 1., 2., 2., 6., 2., 6., 6., 8., 2., 6., 6., 8., 6., 8., 15., 16.];
let e: [f32x4; 4] = [f32x4::new(1., 2., 2., 6.), f32x4::new(2., 6., 6., 8.), f32x4::new(2., 6., 6., 15.), f32x4::new(6., 8., 8., 16.)];
let r: [f32x4; 4] = transmute(vld4q_f32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_dup_s8() {
let a: [i8; 33] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9];
let e: [i8x8; 4] = [i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i8x8; 4] = transmute(vld4_dup_s8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_dup_s16() {
let a: [i16; 17] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9];
let e: [i16x4; 4] = [i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1)];
let r: [i16x4; 4] = transmute(vld4_dup_s16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_dup_s32() {
let a: [i32; 9] = [0, 1, 1, 1, 1, 2, 4, 3, 5];
let e: [i32x2; 4] = [i32x2::new(1, 1), i32x2::new(1, 1), i32x2::new(1, 1), i32x2::new(1, 1)];
let r: [i32x2; 4] = transmute(vld4_dup_s32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_dup_s8() {
let a: [i8; 65] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9];
let e: [i8x16; 4] = [i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i8x16; 4] = transmute(vld4q_dup_s8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_dup_s16() {
let a: [i16; 33] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9];
let e: [i16x8; 4] = [i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i16x8; 4] = transmute(vld4q_dup_s16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_dup_s32() {
let a: [i32; 17] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9];
let e: [i32x4; 4] = [i32x4::new(1, 1, 1, 1), i32x4::new(1, 1, 1, 1), i32x4::new(1, 1, 1, 1), i32x4::new(1, 1, 1, 1)];
let r: [i32x4; 4] = transmute(vld4q_dup_s32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_dup_s64() {
let a: [i64; 5] = [0, 1, 1, 1, 1];
let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(1), i64x1::new(1), i64x1::new(1)];
let r: [i64x1; 4] = transmute(vld4_dup_s64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_dup_u8() {
let a: [u8; 33] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9];
let e: [u8x8; 4] = [u8x8::new(1, 1, 1, 1, 1, 1, 1, 1), u8x8::new(1, 1, 1, 1, 1, 1, 1, 1), u8x8::new(1, 1, 1, 1, 1, 1, 1, 1), u8x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [u8x8; 4] = transmute(vld4_dup_u8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_dup_u16() {
let a: [u16; 17] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9];
let e: [u16x4; 4] = [u16x4::new(1, 1, 1, 1), u16x4::new(1, 1, 1, 1), u16x4::new(1, 1, 1, 1), u16x4::new(1, 1, 1, 1)];
let r: [u16x4; 4] = transmute(vld4_dup_u16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_dup_u32() {
let a: [u32; 9] = [0, 1, 1, 1, 1, 2, 4, 3, 5];
let e: [u32x2; 4] = [u32x2::new(1, 1), u32x2::new(1, 1), u32x2::new(1, 1), u32x2::new(1, 1)];
let r: [u32x2; 4] = transmute(vld4_dup_u32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_dup_u8() {
let a: [u8; 65] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9];
let e: [u8x16; 4] = [u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)];
let r: [u8x16; 4] = transmute(vld4q_dup_u8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_dup_u16() {
let a: [u16; 33] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9];
let e: [u16x8; 4] = [u16x8::new(1, 1, 1, 1, 1, 1, 1, 1), u16x8::new(1, 1, 1, 1, 1, 1, 1, 1), u16x8::new(1, 1, 1, 1, 1, 1, 1, 1), u16x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [u16x8; 4] = transmute(vld4q_dup_u16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_dup_u32() {
let a: [u32; 17] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9];
let e: [u32x4; 4] = [u32x4::new(1, 1, 1, 1), u32x4::new(1, 1, 1, 1), u32x4::new(1, 1, 1, 1), u32x4::new(1, 1, 1, 1)];
let r: [u32x4; 4] = transmute(vld4q_dup_u32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_dup_p8() {
let a: [u8; 33] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9];
let e: [i8x8; 4] = [i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i8x8; 4] = transmute(vld4_dup_p8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_dup_p16() {
let a: [u16; 17] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9];
let e: [i16x4; 4] = [i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1)];
let r: [i16x4; 4] = transmute(vld4_dup_p16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_dup_p8() {
let a: [u8; 65] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9];
let e: [i8x16; 4] = [i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i8x16; 4] = transmute(vld4q_dup_p8(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_dup_p16() {
let a: [u16; 33] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9];
let e: [i16x8; 4] = [i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1)];
let r: [i16x8; 4] = transmute(vld4q_dup_p16(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_dup_u64() {
let a: [u64; 5] = [0, 1, 1, 1, 1];
let e: [u64x1; 4] = [u64x1::new(1), u64x1::new(1), u64x1::new(1), u64x1::new(1)];
let r: [u64x1; 4] = transmute(vld4_dup_u64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_dup_p64() {
let a: [u64; 5] = [0, 1, 1, 1, 1];
let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(1), i64x1::new(1), i64x1::new(1)];
let r: [i64x1; 4] = transmute(vld4_dup_p64(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_dup_f32() {
let a: [f32; 9] = [0., 1., 1., 1., 1., 6., 4., 3., 5.];
let e: [f32x2; 4] = [f32x2::new(1., 1.), f32x2::new(1., 1.), f32x2::new(1., 1.), f32x2::new(1., 1.)];
let r: [f32x2; 4] = transmute(vld4_dup_f32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_dup_f32() {
let a: [f32; 17] = [0., 1., 1., 1., 1., 6., 4., 3., 5., 7., 4., 3., 5., 8., 4., 3., 5.];
let e: [f32x4; 4] = [f32x4::new(1., 1., 1., 1.), f32x4::new(1., 1., 1., 1.), f32x4::new(1., 1., 1., 1.), f32x4::new(1., 1., 1., 1.)];
let r: [f32x4; 4] = transmute(vld4q_dup_f32(a[1..].as_ptr()));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_lane_s8() {
let a: [i8; 33] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i8x8; 4] = [i8x8::new(0, 2, 2, 2, 2, 16, 2, 18), i8x8::new(2, 20, 21, 22, 2, 24, 25, 26), i8x8::new(11, 12, 13, 14, 15, 16, 2, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let e: [i8x8; 4] = [i8x8::new(1, 2, 2, 2, 2, 16, 2, 18), i8x8::new(2, 20, 21, 22, 2, 24, 25, 26), i8x8::new(2, 12, 13, 14, 15, 16, 2, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let r: [i8x8; 4] = transmute(vld4_lane_s8::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_lane_s16() {
let a: [i16; 17] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i16x4; 4] = [i16x4::new(0, 2, 2, 2), i16x4::new(2, 16, 2, 18), i16x4::new(2, 20, 21, 22), i16x4::new(2, 24, 25, 26)];
let e: [i16x4; 4] = [i16x4::new(1, 2, 2, 2), i16x4::new(2, 16, 2, 18), i16x4::new(2, 20, 21, 22), i16x4::new(2, 24, 25, 26)];
let r: [i16x4; 4] = transmute(vld4_lane_s16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_lane_s32() {
let a: [i32; 9] = [0, 1, 2, 2, 2, 5, 6, 7, 8];
let b: [i32x2; 4] = [i32x2::new(0, 2), i32x2::new(2, 2), i32x2::new(2, 16), i32x2::new(2, 18)];
let e: [i32x2; 4] = [i32x2::new(1, 2), i32x2::new(2, 2), i32x2::new(2, 16), i32x2::new(2, 18)];
let r: [i32x2; 4] = transmute(vld4_lane_s32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_lane_s16() {
let a: [i16; 33] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i16x8; 4] = [i16x8::new(0, 2, 2, 2, 2, 16, 2, 18), i16x8::new(2, 20, 21, 22, 2, 24, 25, 26), i16x8::new(11, 12, 13, 14, 15, 16, 2, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let e: [i16x8; 4] = [i16x8::new(1, 2, 2, 2, 2, 16, 2, 18), i16x8::new(2, 20, 21, 22, 2, 24, 25, 26), i16x8::new(2, 12, 13, 14, 15, 16, 2, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let r: [i16x8; 4] = transmute(vld4q_lane_s16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_lane_s32() {
let a: [i32; 17] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i32x4; 4] = [i32x4::new(0, 2, 2, 2), i32x4::new(2, 16, 2, 18), i32x4::new(2, 20, 21, 22), i32x4::new(2, 24, 25, 26)];
let e: [i32x4; 4] = [i32x4::new(1, 2, 2, 2), i32x4::new(2, 16, 2, 18), i32x4::new(2, 20, 21, 22), i32x4::new(2, 24, 25, 26)];
let r: [i32x4; 4] = transmute(vld4q_lane_s32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_lane_u8() {
let a: [u8; 33] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [u8x8; 4] = [u8x8::new(0, 2, 2, 2, 2, 16, 2, 18), u8x8::new(2, 20, 21, 22, 2, 24, 25, 26), u8x8::new(11, 12, 13, 14, 15, 16, 2, 18), u8x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let e: [u8x8; 4] = [u8x8::new(1, 2, 2, 2, 2, 16, 2, 18), u8x8::new(2, 20, 21, 22, 2, 24, 25, 26), u8x8::new(2, 12, 13, 14, 15, 16, 2, 18), u8x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let r: [u8x8; 4] = transmute(vld4_lane_u8::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_lane_u16() {
let a: [u16; 17] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [u16x4; 4] = [u16x4::new(0, 2, 2, 2), u16x4::new(2, 16, 2, 18), u16x4::new(2, 20, 21, 22), u16x4::new(2, 24, 25, 26)];
let e: [u16x4; 4] = [u16x4::new(1, 2, 2, 2), u16x4::new(2, 16, 2, 18), u16x4::new(2, 20, 21, 22), u16x4::new(2, 24, 25, 26)];
let r: [u16x4; 4] = transmute(vld4_lane_u16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_lane_u32() {
let a: [u32; 9] = [0, 1, 2, 2, 2, 5, 6, 7, 8];
let b: [u32x2; 4] = [u32x2::new(0, 2), u32x2::new(2, 2), u32x2::new(2, 16), u32x2::new(2, 18)];
let e: [u32x2; 4] = [u32x2::new(1, 2), u32x2::new(2, 2), u32x2::new(2, 16), u32x2::new(2, 18)];
let r: [u32x2; 4] = transmute(vld4_lane_u32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_lane_u16() {
let a: [u16; 33] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [u16x8; 4] = [u16x8::new(0, 2, 2, 2, 2, 16, 2, 18), u16x8::new(2, 20, 21, 22, 2, 24, 25, 26), u16x8::new(11, 12, 13, 14, 15, 16, 2, 18), u16x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let e: [u16x8; 4] = [u16x8::new(1, 2, 2, 2, 2, 16, 2, 18), u16x8::new(2, 20, 21, 22, 2, 24, 25, 26), u16x8::new(2, 12, 13, 14, 15, 16, 2, 18), u16x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let r: [u16x8; 4] = transmute(vld4q_lane_u16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_lane_u32() {
let a: [u32; 17] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [u32x4; 4] = [u32x4::new(0, 2, 2, 2), u32x4::new(2, 16, 2, 18), u32x4::new(2, 20, 21, 22), u32x4::new(2, 24, 25, 26)];
let e: [u32x4; 4] = [u32x4::new(1, 2, 2, 2), u32x4::new(2, 16, 2, 18), u32x4::new(2, 20, 21, 22), u32x4::new(2, 24, 25, 26)];
let r: [u32x4; 4] = transmute(vld4q_lane_u32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_lane_p8() {
let a: [u8; 33] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i8x8; 4] = [i8x8::new(0, 2, 2, 2, 2, 16, 2, 18), i8x8::new(2, 20, 21, 22, 2, 24, 25, 26), i8x8::new(11, 12, 13, 14, 15, 16, 2, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let e: [i8x8; 4] = [i8x8::new(1, 2, 2, 2, 2, 16, 2, 18), i8x8::new(2, 20, 21, 22, 2, 24, 25, 26), i8x8::new(2, 12, 13, 14, 15, 16, 2, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let r: [i8x8; 4] = transmute(vld4_lane_p8::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_lane_p16() {
let a: [u16; 17] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i16x4; 4] = [i16x4::new(0, 2, 2, 2), i16x4::new(2, 16, 2, 18), i16x4::new(2, 20, 21, 22), i16x4::new(2, 24, 25, 26)];
let e: [i16x4; 4] = [i16x4::new(1, 2, 2, 2), i16x4::new(2, 16, 2, 18), i16x4::new(2, 20, 21, 22), i16x4::new(2, 24, 25, 26)];
let r: [i16x4; 4] = transmute(vld4_lane_p16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_lane_p16() {
let a: [u16; 33] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8];
let b: [i16x8; 4] = [i16x8::new(0, 2, 2, 2, 2, 16, 2, 18), i16x8::new(2, 20, 21, 22, 2, 24, 25, 26), i16x8::new(11, 12, 13, 14, 15, 16, 2, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let e: [i16x8; 4] = [i16x8::new(1, 2, 2, 2, 2, 16, 2, 18), i16x8::new(2, 20, 21, 22, 2, 24, 25, 26), i16x8::new(2, 12, 13, 14, 15, 16, 2, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26)];
let r: [i16x8; 4] = transmute(vld4q_lane_p16::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4_lane_f32() {
let a: [f32; 9] = [0., 1., 2., 2., 2., 5., 6., 7., 8.];
let b: [f32x2; 4] = [f32x2::new(0., 2.), f32x2::new(2., 2.), f32x2::new(2., 16.), f32x2::new(2., 18.)];
let e: [f32x2; 4] = [f32x2::new(1., 2.), f32x2::new(2., 2.), f32x2::new(2., 16.), f32x2::new(2., 18.)];
let r: [f32x2; 4] = transmute(vld4_lane_f32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vld4q_lane_f32() {
let a: [f32; 17] = [0., 1., 2., 2., 2., 5., 6., 7., 8., 5., 6., 7., 8., 1., 4., 3., 5.];
let b: [f32x4; 4] = [f32x4::new(0., 2., 2., 2.), f32x4::new(2., 16., 2., 18.), f32x4::new(5., 6., 7., 8.), f32x4::new(1., 4., 3., 5.)];
let e: [f32x4; 4] = [f32x4::new(1., 2., 2., 2.), f32x4::new(2., 16., 2., 18.), f32x4::new(2., 6., 7., 8.), f32x4::new(2., 4., 3., 5.)];
let r: [f32x4; 4] = transmute(vld4q_lane_f32::<0>(a[1..].as_ptr(), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_lane_s8() {
let a: [i8; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [i8; 8] = [1, 0, 0, 0, 0, 0, 0, 0];
let mut r: [i8; 8] = [0i8; 8];
vst1_lane_s8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_lane_s16() {
let a: [i16; 5] = [0, 1, 2, 3, 4];
let e: [i16; 4] = [1, 0, 0, 0];
let mut r: [i16; 4] = [0i16; 4];
vst1_lane_s16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_lane_s32() {
let a: [i32; 3] = [0, 1, 2];
let e: [i32; 2] = [1, 0];
let mut r: [i32; 2] = [0i32; 2];
vst1_lane_s32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_lane_s64() {
let a: [i64; 2] = [0, 1];
let e: [i64; 1] = [1];
let mut r: [i64; 1] = [0i64; 1];
vst1_lane_s64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_lane_s8() {
let a: [i8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [i8; 16] = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [i8; 16] = [0i8; 16];
vst1q_lane_s8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_lane_s16() {
let a: [i16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [i16; 8] = [1, 0, 0, 0, 0, 0, 0, 0];
let mut r: [i16; 8] = [0i16; 8];
vst1q_lane_s16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_lane_s32() {
let a: [i32; 5] = [0, 1, 2, 3, 4];
let e: [i32; 4] = [1, 0, 0, 0];
let mut r: [i32; 4] = [0i32; 4];
vst1q_lane_s32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_lane_s64() {
let a: [i64; 3] = [0, 1, 2];
let e: [i64; 2] = [1, 0];
let mut r: [i64; 2] = [0i64; 2];
vst1q_lane_s64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_lane_u8() {
let a: [u8; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [u8; 8] = [1, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u8; 8] = [0u8; 8];
vst1_lane_u8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_lane_u16() {
let a: [u16; 5] = [0, 1, 2, 3, 4];
let e: [u16; 4] = [1, 0, 0, 0];
let mut r: [u16; 4] = [0u16; 4];
vst1_lane_u16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_lane_u32() {
let a: [u32; 3] = [0, 1, 2];
let e: [u32; 2] = [1, 0];
let mut r: [u32; 2] = [0u32; 2];
vst1_lane_u32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_lane_u64() {
let a: [u64; 2] = [0, 1];
let e: [u64; 1] = [1];
let mut r: [u64; 1] = [0u64; 1];
vst1_lane_u64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_lane_u8() {
let a: [u8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [u8; 16] = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u8; 16] = [0u8; 16];
vst1q_lane_u8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_lane_u16() {
let a: [u16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [u16; 8] = [1, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u16; 8] = [0u16; 8];
vst1q_lane_u16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_lane_u32() {
let a: [u32; 5] = [0, 1, 2, 3, 4];
let e: [u32; 4] = [1, 0, 0, 0];
let mut r: [u32; 4] = [0u32; 4];
vst1q_lane_u32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_lane_u64() {
let a: [u64; 3] = [0, 1, 2];
let e: [u64; 2] = [1, 0];
let mut r: [u64; 2] = [0u64; 2];
vst1q_lane_u64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_lane_p8() {
let a: [u8; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [u8; 8] = [1, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u8; 8] = [0u8; 8];
vst1_lane_p8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_lane_p16() {
let a: [u16; 5] = [0, 1, 2, 3, 4];
let e: [u16; 4] = [1, 0, 0, 0];
let mut r: [u16; 4] = [0u16; 4];
vst1_lane_p16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_lane_p8() {
let a: [u8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [u8; 16] = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u8; 16] = [0u8; 16];
vst1q_lane_p8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_lane_p16() {
let a: [u16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [u16; 8] = [1, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u16; 8] = [0u16; 8];
vst1q_lane_p16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_lane_p64() {
let a: [u64; 2] = [0, 1];
let e: [u64; 1] = [1];
let mut r: [u64; 1] = [0u64; 1];
vst1_lane_p64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_lane_p64() {
let a: [u64; 3] = [0, 1, 2];
let e: [u64; 2] = [1, 0];
let mut r: [u64; 2] = [0u64; 2];
vst1q_lane_p64::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_lane_f32() {
let a: [f32; 3] = [0., 1., 2.];
let e: [f32; 2] = [1., 0.];
let mut r: [f32; 2] = [0f32; 2];
vst1_lane_f32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_lane_f32() {
let a: [f32; 5] = [0., 1., 2., 3., 4.];
let e: [f32; 4] = [1., 0., 0., 0.];
let mut r: [f32; 4] = [0f32; 4];
vst1q_lane_f32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_s8_x2() {
let a: [i8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [i8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut r: [i8; 16] = [0i8; 16];
vst1_s8_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_s16_x2() {
let a: [i16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [i16; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
let mut r: [i16; 8] = [0i16; 8];
vst1_s16_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_s32_x2() {
let a: [i32; 5] = [0, 1, 2, 3, 4];
let e: [i32; 4] = [1, 2, 3, 4];
let mut r: [i32; 4] = [0i32; 4];
vst1_s32_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_s64_x2() {
let a: [i64; 3] = [0, 1, 2];
let e: [i64; 2] = [1, 2];
let mut r: [i64; 2] = [0i64; 2];
vst1_s64_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_s8_x2() {
let a: [i8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [i8; 32] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let mut r: [i8; 32] = [0i8; 32];
vst1q_s8_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_s16_x2() {
let a: [i16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [i16; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut r: [i16; 16] = [0i16; 16];
vst1q_s16_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_s32_x2() {
let a: [i32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [i32; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
let mut r: [i32; 8] = [0i32; 8];
vst1q_s32_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_s64_x2() {
let a: [i64; 5] = [0, 1, 2, 3, 4];
let e: [i64; 4] = [1, 2, 3, 4];
let mut r: [i64; 4] = [0i64; 4];
vst1q_s64_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_s8_x3() {
let a: [i8; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let e: [i8; 24] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let mut r: [i8; 24] = [0i8; 24];
vst1_s8_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_s16_x3() {
let a: [i16; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let e: [i16; 12] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let mut r: [i16; 12] = [0i16; 12];
vst1_s16_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_s32_x3() {
let a: [i32; 7] = [0, 1, 2, 3, 4, 5, 6];
let e: [i32; 6] = [1, 2, 3, 4, 5, 6];
let mut r: [i32; 6] = [0i32; 6];
vst1_s32_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_s64_x3() {
let a: [i64; 4] = [0, 1, 2, 3];
let e: [i64; 3] = [1, 2, 3];
let mut r: [i64; 3] = [0i64; 3];
vst1_s64_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_s8_x3() {
let a: [i8; 49] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [i8; 48] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut r: [i8; 48] = [0i8; 48];
vst1q_s8_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_s16_x3() {
let a: [i16; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let e: [i16; 24] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let mut r: [i16; 24] = [0i16; 24];
vst1q_s16_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_s32_x3() {
let a: [i32; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let e: [i32; 12] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let mut r: [i32; 12] = [0i32; 12];
vst1q_s32_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_s64_x3() {
let a: [i64; 7] = [0, 1, 2, 3, 4, 5, 6];
let e: [i64; 6] = [1, 2, 3, 4, 5, 6];
let mut r: [i64; 6] = [0i64; 6];
vst1q_s64_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_s8_x4() {
let a: [i8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [i8; 32] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let mut r: [i8; 32] = [0i8; 32];
vst1_s8_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_s16_x4() {
let a: [i16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [i16; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut r: [i16; 16] = [0i16; 16];
vst1_s16_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_s32_x4() {
let a: [i32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [i32; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
let mut r: [i32; 8] = [0i32; 8];
vst1_s32_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_s64_x4() {
let a: [i64; 5] = [0, 1, 2, 3, 4];
let e: [i64; 4] = [1, 2, 3, 4];
let mut r: [i64; 4] = [0i64; 4];
vst1_s64_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_s8_x4() {
let a: [i8; 65] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [i8; 64] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let mut r: [i8; 64] = [0i8; 64];
vst1q_s8_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_s16_x4() {
let a: [i16; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [i16; 32] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let mut r: [i16; 32] = [0i16; 32];
vst1q_s16_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_s32_x4() {
let a: [i32; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [i32; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut r: [i32; 16] = [0i32; 16];
vst1q_s32_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_s64_x4() {
let a: [i64; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [i64; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
let mut r: [i64; 8] = [0i64; 8];
vst1q_s64_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_u8_x2() {
let a: [u8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut r: [u8; 16] = [0u8; 16];
vst1_u8_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_u16_x2() {
let a: [u16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [u16; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
let mut r: [u16; 8] = [0u16; 8];
vst1_u16_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_u32_x2() {
let a: [u32; 5] = [0, 1, 2, 3, 4];
let e: [u32; 4] = [1, 2, 3, 4];
let mut r: [u32; 4] = [0u32; 4];
vst1_u32_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_u64_x2() {
let a: [u64; 3] = [0, 1, 2];
let e: [u64; 2] = [1, 2];
let mut r: [u64; 2] = [0u64; 2];
vst1_u64_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_u8_x2() {
let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [u8; 32] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let mut r: [u8; 32] = [0u8; 32];
vst1q_u8_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_u16_x2() {
let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [u16; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut r: [u16; 16] = [0u16; 16];
vst1q_u16_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_u32_x2() {
let a: [u32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [u32; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
let mut r: [u32; 8] = [0u32; 8];
vst1q_u32_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_u64_x2() {
let a: [u64; 5] = [0, 1, 2, 3, 4];
let e: [u64; 4] = [1, 2, 3, 4];
let mut r: [u64; 4] = [0u64; 4];
vst1q_u64_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_u8_x3() {
let a: [u8; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let e: [u8; 24] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let mut r: [u8; 24] = [0u8; 24];
vst1_u8_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_u16_x3() {
let a: [u16; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let e: [u16; 12] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let mut r: [u16; 12] = [0u16; 12];
vst1_u16_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_u32_x3() {
let a: [u32; 7] = [0, 1, 2, 3, 4, 5, 6];
let e: [u32; 6] = [1, 2, 3, 4, 5, 6];
let mut r: [u32; 6] = [0u32; 6];
vst1_u32_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_u64_x3() {
let a: [u64; 4] = [0, 1, 2, 3];
let e: [u64; 3] = [1, 2, 3];
let mut r: [u64; 3] = [0u64; 3];
vst1_u64_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_u8_x3() {
let a: [u8; 49] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [u8; 48] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut r: [u8; 48] = [0u8; 48];
vst1q_u8_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_u16_x3() {
let a: [u16; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let e: [u16; 24] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let mut r: [u16; 24] = [0u16; 24];
vst1q_u16_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_u32_x3() {
let a: [u32; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let e: [u32; 12] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let mut r: [u32; 12] = [0u32; 12];
vst1q_u32_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_u64_x3() {
let a: [u64; 7] = [0, 1, 2, 3, 4, 5, 6];
let e: [u64; 6] = [1, 2, 3, 4, 5, 6];
let mut r: [u64; 6] = [0u64; 6];
vst1q_u64_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_u8_x4() {
let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [u8; 32] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let mut r: [u8; 32] = [0u8; 32];
vst1_u8_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_u16_x4() {
let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [u16; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut r: [u16; 16] = [0u16; 16];
vst1_u16_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_u32_x4() {
let a: [u32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [u32; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
let mut r: [u32; 8] = [0u32; 8];
vst1_u32_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_u64_x4() {
let a: [u64; 5] = [0, 1, 2, 3, 4];
let e: [u64; 4] = [1, 2, 3, 4];
let mut r: [u64; 4] = [0u64; 4];
vst1_u64_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_u8_x4() {
let a: [u8; 65] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [u8; 64] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let mut r: [u8; 64] = [0u8; 64];
vst1q_u8_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_u16_x4() {
let a: [u16; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [u16; 32] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let mut r: [u16; 32] = [0u16; 32];
vst1q_u16_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_u32_x4() {
let a: [u32; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [u32; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut r: [u32; 16] = [0u32; 16];
vst1q_u32_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_u64_x4() {
let a: [u64; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [u64; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
let mut r: [u64; 8] = [0u64; 8];
vst1q_u64_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_p8_x2() {
let a: [u8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut r: [u8; 16] = [0u8; 16];
vst1_p8_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_p8_x3() {
let a: [u8; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let e: [u8; 24] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let mut r: [u8; 24] = [0u8; 24];
vst1_p8_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_p8_x4() {
let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [u8; 32] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let mut r: [u8; 32] = [0u8; 32];
vst1_p8_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_p8_x2() {
let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [u8; 32] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let mut r: [u8; 32] = [0u8; 32];
vst1q_p8_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_p8_x3() {
let a: [u8; 49] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [u8; 48] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut r: [u8; 48] = [0u8; 48];
vst1q_p8_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_p8_x4() {
let a: [u8; 65] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [u8; 64] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let mut r: [u8; 64] = [0u8; 64];
vst1q_p8_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_p16_x2() {
let a: [u16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [u16; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
let mut r: [u16; 8] = [0u16; 8];
vst1_p16_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_p16_x3() {
let a: [u16; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let e: [u16; 12] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let mut r: [u16; 12] = [0u16; 12];
vst1_p16_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_p16_x4() {
let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [u16; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut r: [u16; 16] = [0u16; 16];
vst1_p16_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_p16_x2() {
let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let e: [u16; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut r: [u16; 16] = [0u16; 16];
vst1q_p16_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_p16_x3() {
let a: [u16; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let e: [u16; 24] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24];
let mut r: [u16; 24] = [0u16; 24];
vst1q_p16_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_p16_x4() {
let a: [u16; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let e: [u16; 32] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32];
let mut r: [u16; 32] = [0u16; 32];
vst1q_p16_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_p64_x2() {
let a: [u64; 3] = [0, 1, 2];
let e: [u64; 2] = [1, 2];
let mut r: [u64; 2] = [0u64; 2];
vst1_p64_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_p64_x3() {
let a: [u64; 4] = [0, 1, 2, 3];
let e: [u64; 3] = [1, 2, 3];
let mut r: [u64; 3] = [0u64; 3];
vst1_p64_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_p64_x4() {
let a: [u64; 5] = [0, 1, 2, 3, 4];
let e: [u64; 4] = [1, 2, 3, 4];
let mut r: [u64; 4] = [0u64; 4];
vst1_p64_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_p64_x2() {
let a: [u64; 5] = [0, 1, 2, 3, 4];
let e: [u64; 4] = [1, 2, 3, 4];
let mut r: [u64; 4] = [0u64; 4];
vst1q_p64_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_p64_x3() {
let a: [u64; 7] = [0, 1, 2, 3, 4, 5, 6];
let e: [u64; 6] = [1, 2, 3, 4, 5, 6];
let mut r: [u64; 6] = [0u64; 6];
vst1q_p64_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_p64_x4() {
let a: [u64; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8];
let e: [u64; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
let mut r: [u64; 8] = [0u64; 8];
vst1q_p64_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_f32_x2() {
let a: [f32; 5] = [0., 1., 2., 3., 4.];
let e: [f32; 4] = [1., 2., 3., 4.];
let mut r: [f32; 4] = [0f32; 4];
vst1_f32_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_f32_x2() {
let a: [f32; 9] = [0., 1., 2., 3., 4., 5., 6., 7., 8.];
let e: [f32; 8] = [1., 2., 3., 4., 5., 6., 7., 8.];
let mut r: [f32; 8] = [0f32; 8];
vst1q_f32_x2(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_f32_x3() {
let a: [f32; 7] = [0., 1., 2., 3., 4., 5., 6.];
let e: [f32; 6] = [1., 2., 3., 4., 5., 6.];
let mut r: [f32; 6] = [0f32; 6];
vst1_f32_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_f32_x3() {
let a: [f32; 13] = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.];
let e: [f32; 12] = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.];
let mut r: [f32; 12] = [0f32; 12];
vst1q_f32_x3(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1_f32_x4() {
let a: [f32; 9] = [0., 1., 2., 3., 4., 5., 6., 7., 8.];
let e: [f32; 8] = [1., 2., 3., 4., 5., 6., 7., 8.];
let mut r: [f32; 8] = [0f32; 8];
vst1_f32_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst1q_f32_x4() {
let a: [f32; 17] = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.];
let e: [f32; 16] = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.];
let mut r: [f32; 16] = [0f32; 16];
vst1q_f32_x4(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_s8() {
let a: [i8; 17] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9];
let e: [i8; 16] = [1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9];
let mut r: [i8; 16] = [0i8; 16];
vst2_s8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_s16() {
let a: [i16; 9] = [0, 1, 2, 2, 3, 2, 3, 4, 5];
let e: [i16; 8] = [1, 2, 2, 3, 2, 4, 3, 5];
let mut r: [i16; 8] = [0i16; 8];
vst2_s16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_s32() {
let a: [i32; 5] = [0, 1, 2, 2, 3];
let e: [i32; 4] = [1, 2, 2, 3];
let mut r: [i32; 4] = [0i32; 4];
vst2_s32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2q_s8() {
let a: [i8; 33] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17];
let e: [i8; 32] = [1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17];
let mut r: [i8; 32] = [0i8; 32];
vst2q_s8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2q_s16() {
let a: [i16; 17] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9];
let e: [i16; 16] = [1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9];
let mut r: [i16; 16] = [0i16; 16];
vst2q_s16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2q_s32() {
let a: [i32; 9] = [0, 1, 2, 2, 3, 2, 3, 4, 5];
let e: [i32; 8] = [1, 2, 2, 3, 2, 4, 3, 5];
let mut r: [i32; 8] = [0i32; 8];
vst2q_s32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_s64() {
let a: [i64; 3] = [0, 1, 2];
let e: [i64; 2] = [1, 2];
let mut r: [i64; 2] = [0i64; 2];
vst2_s64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_u8() {
let a: [u8; 17] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9];
let e: [u8; 16] = [1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9];
let mut r: [u8; 16] = [0u8; 16];
vst2_u8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_u16() {
let a: [u16; 9] = [0, 1, 2, 2, 3, 2, 3, 4, 5];
let e: [u16; 8] = [1, 2, 2, 3, 2, 4, 3, 5];
let mut r: [u16; 8] = [0u16; 8];
vst2_u16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_u32() {
let a: [u32; 5] = [0, 1, 2, 2, 3];
let e: [u32; 4] = [1, 2, 2, 3];
let mut r: [u32; 4] = [0u32; 4];
vst2_u32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2q_u8() {
let a: [u8; 33] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17];
let e: [u8; 32] = [1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17];
let mut r: [u8; 32] = [0u8; 32];
vst2q_u8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2q_u16() {
let a: [u16; 17] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9];
let e: [u16; 16] = [1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9];
let mut r: [u16; 16] = [0u16; 16];
vst2q_u16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2q_u32() {
let a: [u32; 9] = [0, 1, 2, 2, 3, 2, 3, 4, 5];
let e: [u32; 8] = [1, 2, 2, 3, 2, 4, 3, 5];
let mut r: [u32; 8] = [0u32; 8];
vst2q_u32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_p8() {
let a: [u8; 17] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9];
let e: [u8; 16] = [1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9];
let mut r: [u8; 16] = [0u8; 16];
vst2_p8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_p16() {
let a: [u16; 9] = [0, 1, 2, 2, 3, 2, 3, 4, 5];
let e: [u16; 8] = [1, 2, 2, 3, 2, 4, 3, 5];
let mut r: [u16; 8] = [0u16; 8];
vst2_p16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2q_p8() {
let a: [u8; 33] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17];
let e: [u8; 32] = [1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17];
let mut r: [u8; 32] = [0u8; 32];
vst2q_p8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2q_p16() {
let a: [u16; 17] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9];
let e: [u16; 16] = [1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9];
let mut r: [u16; 16] = [0u16; 16];
vst2q_p16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_u64() {
let a: [u64; 3] = [0, 1, 2];
let e: [u64; 2] = [1, 2];
let mut r: [u64; 2] = [0u64; 2];
vst2_u64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_p64() {
let a: [u64; 3] = [0, 1, 2];
let e: [u64; 2] = [1, 2];
let mut r: [u64; 2] = [0u64; 2];
vst2_p64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_f32() {
let a: [f32; 5] = [0., 1., 2., 2., 3.];
let e: [f32; 4] = [1., 2., 2., 3.];
let mut r: [f32; 4] = [0f32; 4];
vst2_f32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2q_f32() {
let a: [f32; 9] = [0., 1., 2., 2., 3., 2., 3., 4., 5.];
let e: [f32; 8] = [1., 2., 2., 3., 2., 4., 3., 5.];
let mut r: [f32; 8] = [0f32; 8];
vst2q_f32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_lane_s8() {
let a: [i8; 17] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9];
let e: [i8; 16] = [1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [i8; 16] = [0i8; 16];
vst2_lane_s8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_lane_s16() {
let a: [i16; 9] = [0, 1, 2, 2, 3, 2, 3, 4, 5];
let e: [i16; 8] = [1, 2, 0, 0, 0, 0, 0, 0];
let mut r: [i16; 8] = [0i16; 8];
vst2_lane_s16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_lane_s32() {
let a: [i32; 5] = [0, 1, 2, 2, 3];
let e: [i32; 4] = [1, 2, 0, 0];
let mut r: [i32; 4] = [0i32; 4];
vst2_lane_s32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2q_lane_s16() {
let a: [i16; 17] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9];
let e: [i16; 16] = [1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [i16; 16] = [0i16; 16];
vst2q_lane_s16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2q_lane_s32() {
let a: [i32; 9] = [0, 1, 2, 2, 3, 2, 3, 4, 5];
let e: [i32; 8] = [1, 2, 0, 0, 0, 0, 0, 0];
let mut r: [i32; 8] = [0i32; 8];
vst2q_lane_s32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_lane_u8() {
let a: [u8; 17] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9];
let e: [u8; 16] = [1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u8; 16] = [0u8; 16];
vst2_lane_u8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_lane_u16() {
let a: [u16; 9] = [0, 1, 2, 2, 3, 2, 3, 4, 5];
let e: [u16; 8] = [1, 2, 0, 0, 0, 0, 0, 0];
let mut r: [u16; 8] = [0u16; 8];
vst2_lane_u16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_lane_u32() {
let a: [u32; 5] = [0, 1, 2, 2, 3];
let e: [u32; 4] = [1, 2, 0, 0];
let mut r: [u32; 4] = [0u32; 4];
vst2_lane_u32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2q_lane_u16() {
let a: [u16; 17] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9];
let e: [u16; 16] = [1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u16; 16] = [0u16; 16];
vst2q_lane_u16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2q_lane_u32() {
let a: [u32; 9] = [0, 1, 2, 2, 3, 2, 3, 4, 5];
let e: [u32; 8] = [1, 2, 0, 0, 0, 0, 0, 0];
let mut r: [u32; 8] = [0u32; 8];
vst2q_lane_u32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_lane_p8() {
let a: [u8; 17] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9];
let e: [u8; 16] = [1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u8; 16] = [0u8; 16];
vst2_lane_p8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_lane_p16() {
let a: [u16; 9] = [0, 1, 2, 2, 3, 2, 3, 4, 5];
let e: [u16; 8] = [1, 2, 0, 0, 0, 0, 0, 0];
let mut r: [u16; 8] = [0u16; 8];
vst2_lane_p16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2q_lane_p16() {
let a: [u16; 17] = [0, 1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9];
let e: [u16; 16] = [1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u16; 16] = [0u16; 16];
vst2q_lane_p16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2_lane_f32() {
let a: [f32; 5] = [0., 1., 2., 2., 3.];
let e: [f32; 4] = [1., 2., 0., 0.];
let mut r: [f32; 4] = [0f32; 4];
vst2_lane_f32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst2q_lane_f32() {
let a: [f32; 9] = [0., 1., 2., 2., 3., 2., 3., 4., 5.];
let e: [f32; 8] = [1., 2., 0., 0., 0., 0., 0., 0.];
let mut r: [f32; 8] = [0f32; 8];
vst2q_lane_f32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_s8() {
let a: [i8; 25] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16];
let e: [i8; 24] = [1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16];
let mut r: [i8; 24] = [0i8; 24];
vst3_s8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_s16() {
let a: [i16; 13] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8];
let e: [i16; 12] = [1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8];
let mut r: [i16; 12] = [0i16; 12];
vst3_s16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_s32() {
let a: [i32; 7] = [0, 1, 2, 2, 4, 2, 4];
let e: [i32; 6] = [1, 2, 2, 2, 4, 4];
let mut r: [i32; 6] = [0i32; 6];
vst3_s32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3q_s8() {
let a: [i8; 49] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32, 2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48];
let e: [i8; 48] = [1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16, 2, 25, 41, 4, 26, 42, 7, 27, 43, 8, 28, 44, 13, 29, 45, 14, 30, 46, 15, 31, 47, 16, 32, 48];
let mut r: [i8; 48] = [0i8; 48];
vst3q_s8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3q_s16() {
let a: [i16; 25] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16];
let e: [i16; 24] = [1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16];
let mut r: [i16; 24] = [0i16; 24];
vst3q_s16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3q_s32() {
let a: [i32; 13] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8];
let e: [i32; 12] = [1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8];
let mut r: [i32; 12] = [0i32; 12];
vst3q_s32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_s64() {
let a: [i64; 4] = [0, 1, 2, 2];
let e: [i64; 3] = [1, 2, 2];
let mut r: [i64; 3] = [0i64; 3];
vst3_s64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_u8() {
let a: [u8; 25] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16];
let e: [u8; 24] = [1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16];
let mut r: [u8; 24] = [0u8; 24];
vst3_u8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_u16() {
let a: [u16; 13] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8];
let e: [u16; 12] = [1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8];
let mut r: [u16; 12] = [0u16; 12];
vst3_u16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_u32() {
let a: [u32; 7] = [0, 1, 2, 2, 4, 2, 4];
let e: [u32; 6] = [1, 2, 2, 2, 4, 4];
let mut r: [u32; 6] = [0u32; 6];
vst3_u32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3q_u8() {
let a: [u8; 49] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32, 2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48];
let e: [u8; 48] = [1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16, 2, 25, 41, 4, 26, 42, 7, 27, 43, 8, 28, 44, 13, 29, 45, 14, 30, 46, 15, 31, 47, 16, 32, 48];
let mut r: [u8; 48] = [0u8; 48];
vst3q_u8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3q_u16() {
let a: [u16; 25] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16];
let e: [u16; 24] = [1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16];
let mut r: [u16; 24] = [0u16; 24];
vst3q_u16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3q_u32() {
let a: [u32; 13] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8];
let e: [u32; 12] = [1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8];
let mut r: [u32; 12] = [0u32; 12];
vst3q_u32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_p8() {
let a: [u8; 25] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16];
let e: [u8; 24] = [1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16];
let mut r: [u8; 24] = [0u8; 24];
vst3_p8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_p16() {
let a: [u16; 13] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8];
let e: [u16; 12] = [1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8];
let mut r: [u16; 12] = [0u16; 12];
vst3_p16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3q_p8() {
let a: [u8; 49] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32, 2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48];
let e: [u8; 48] = [1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16, 2, 25, 41, 4, 26, 42, 7, 27, 43, 8, 28, 44, 13, 29, 45, 14, 30, 46, 15, 31, 47, 16, 32, 48];
let mut r: [u8; 48] = [0u8; 48];
vst3q_p8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3q_p16() {
let a: [u16; 25] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16];
let e: [u16; 24] = [1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16];
let mut r: [u16; 24] = [0u16; 24];
vst3q_p16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_u64() {
let a: [u64; 4] = [0, 1, 2, 2];
let e: [u64; 3] = [1, 2, 2];
let mut r: [u64; 3] = [0u64; 3];
vst3_u64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_p64() {
let a: [u64; 4] = [0, 1, 2, 2];
let e: [u64; 3] = [1, 2, 2];
let mut r: [u64; 3] = [0u64; 3];
vst3_p64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_f32() {
let a: [f32; 7] = [0., 1., 2., 2., 4., 2., 4.];
let e: [f32; 6] = [1., 2., 2., 2., 4., 4.];
let mut r: [f32; 6] = [0f32; 6];
vst3_f32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3q_f32() {
let a: [f32; 13] = [0., 1., 2., 2., 4., 2., 4., 7., 8., 2., 4., 7., 8.];
let e: [f32; 12] = [1., 2., 2., 2., 4., 4., 2., 7., 7., 4., 8., 8.];
let mut r: [f32; 12] = [0f32; 12];
vst3q_f32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_lane_s8() {
let a: [i8; 25] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16];
let e: [i8; 24] = [1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [i8; 24] = [0i8; 24];
vst3_lane_s8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_lane_s16() {
let a: [i16; 13] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8];
let e: [i16; 12] = [1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [i16; 12] = [0i16; 12];
vst3_lane_s16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_lane_s32() {
let a: [i32; 7] = [0, 1, 2, 2, 4, 2, 4];
let e: [i32; 6] = [1, 2, 2, 0, 0, 0];
let mut r: [i32; 6] = [0i32; 6];
vst3_lane_s32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3q_lane_s16() {
let a: [i16; 25] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16];
let e: [i16; 24] = [1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [i16; 24] = [0i16; 24];
vst3q_lane_s16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3q_lane_s32() {
let a: [i32; 13] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8];
let e: [i32; 12] = [1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [i32; 12] = [0i32; 12];
vst3q_lane_s32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_lane_u8() {
let a: [u8; 25] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16];
let e: [u8; 24] = [1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u8; 24] = [0u8; 24];
vst3_lane_u8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_lane_u16() {
let a: [u16; 13] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8];
let e: [u16; 12] = [1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u16; 12] = [0u16; 12];
vst3_lane_u16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_lane_u32() {
let a: [u32; 7] = [0, 1, 2, 2, 4, 2, 4];
let e: [u32; 6] = [1, 2, 2, 0, 0, 0];
let mut r: [u32; 6] = [0u32; 6];
vst3_lane_u32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3q_lane_u16() {
let a: [u16; 25] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16];
let e: [u16; 24] = [1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u16; 24] = [0u16; 24];
vst3q_lane_u16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3q_lane_u32() {
let a: [u32; 13] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8];
let e: [u32; 12] = [1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u32; 12] = [0u32; 12];
vst3q_lane_u32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_lane_p8() {
let a: [u8; 25] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16];
let e: [u8; 24] = [1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u8; 24] = [0u8; 24];
vst3_lane_p8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_lane_p16() {
let a: [u16; 13] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8];
let e: [u16; 12] = [1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u16; 12] = [0u16; 12];
vst3_lane_p16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3q_lane_p16() {
let a: [u16; 25] = [0, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16, 2, 4, 7, 8, 13, 14, 15, 16];
let e: [u16; 24] = [1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u16; 24] = [0u16; 24];
vst3q_lane_p16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3_lane_f32() {
let a: [f32; 7] = [0., 1., 2., 2., 3., 2., 3.];
let e: [f32; 6] = [1., 2., 2., 0., 0., 0.];
let mut r: [f32; 6] = [0f32; 6];
vst3_lane_f32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst3q_lane_f32() {
let a: [f32; 13] = [0., 1., 2., 2., 3., 2., 3., 4., 5., 2., 3., 4., 5.];
let e: [f32; 12] = [1., 2., 2., 0., 0., 0., 0., 0., 0., 0., 0., 0.];
let mut r: [f32; 12] = [0f32; 12];
vst3q_lane_f32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_s8() {
let a: [i8; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [i8; 32] = [1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let mut r: [i8; 32] = [0i8; 32];
vst4_s8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_s16() {
let a: [i16; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let e: [i16; 16] = [1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let mut r: [i16; 16] = [0i16; 16];
vst4_s16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_s32() {
let a: [i32; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
let e: [i32; 8] = [1, 2, 2, 6, 2, 6, 6, 8];
let mut r: [i32; 8] = [0i32; 8];
vst4_s32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4q_s8() {
let a: [i8; 65] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64];
let e: [i8; 64] = [1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64];
let mut r: [i8; 64] = [0i8; 64];
vst4q_s8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4q_s16() {
let a: [i16; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [i16; 32] = [1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let mut r: [i16; 32] = [0i16; 32];
vst4q_s16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4q_s32() {
let a: [i32; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let e: [i32; 16] = [1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let mut r: [i32; 16] = [0i32; 16];
vst4q_s32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_s64() {
let a: [i64; 5] = [0, 1, 2, 2, 6];
let e: [i64; 4] = [1, 2, 2, 6];
let mut r: [i64; 4] = [0i64; 4];
vst4_s64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_u8() {
let a: [u8; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [u8; 32] = [1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let mut r: [u8; 32] = [0u8; 32];
vst4_u8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_u16() {
let a: [u16; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let e: [u16; 16] = [1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let mut r: [u16; 16] = [0u16; 16];
vst4_u16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_u32() {
let a: [u32; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
let e: [u32; 8] = [1, 2, 2, 6, 2, 6, 6, 8];
let mut r: [u32; 8] = [0u32; 8];
vst4_u32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4q_u8() {
let a: [u8; 65] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64];
let e: [u8; 64] = [1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64];
let mut r: [u8; 64] = [0u8; 64];
vst4q_u8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4q_u16() {
let a: [u16; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [u16; 32] = [1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let mut r: [u16; 32] = [0u16; 32];
vst4q_u16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4q_u32() {
let a: [u32; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let e: [u32; 16] = [1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let mut r: [u32; 16] = [0u32; 16];
vst4q_u32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_p8() {
let a: [u8; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [u8; 32] = [1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let mut r: [u8; 32] = [0u8; 32];
vst4_p8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_p16() {
let a: [u16; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let e: [u16; 16] = [1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let mut r: [u16; 16] = [0u16; 16];
vst4_p16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4q_p8() {
let a: [u8; 65] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64];
let e: [u8; 64] = [1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64];
let mut r: [u8; 64] = [0u8; 64];
vst4q_p8(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4q_p16() {
let a: [u16; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [u16; 32] = [1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let mut r: [u16; 32] = [0u16; 32];
vst4q_p16(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_u64() {
let a: [u64; 5] = [0, 1, 2, 2, 6];
let e: [u64; 4] = [1, 2, 2, 6];
let mut r: [u64; 4] = [0u64; 4];
vst4_u64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_p64() {
let a: [u64; 5] = [0, 1, 2, 2, 6];
let e: [u64; 4] = [1, 2, 2, 6];
let mut r: [u64; 4] = [0u64; 4];
vst4_p64(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_f32() {
let a: [f32; 9] = [0., 1., 2., 2., 6., 2., 6., 6., 8.];
let e: [f32; 8] = [1., 2., 2., 6., 2., 6., 6., 8.];
let mut r: [f32; 8] = [0f32; 8];
vst4_f32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4q_f32() {
let a: [f32; 17] = [0., 1., 2., 2., 6., 2., 6., 6., 8., 2., 6., 6., 8., 6., 8., 8., 16.];
let e: [f32; 16] = [1., 2., 2., 6., 2., 6., 6., 8., 2., 6., 6., 8., 6., 8., 8., 16.];
let mut r: [f32; 16] = [0f32; 16];
vst4q_f32(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_lane_s8() {
let a: [i8; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [i8; 32] = [1, 2, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [i8; 32] = [0i8; 32];
vst4_lane_s8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_lane_s16() {
let a: [i16; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let e: [i16; 16] = [1, 2, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [i16; 16] = [0i16; 16];
vst4_lane_s16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_lane_s32() {
let a: [i32; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
let e: [i32; 8] = [1, 2, 2, 6, 0, 0, 0, 0];
let mut r: [i32; 8] = [0i32; 8];
vst4_lane_s32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4q_lane_s16() {
let a: [i16; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [i16; 32] = [1, 2, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [i16; 32] = [0i16; 32];
vst4q_lane_s16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4q_lane_s32() {
let a: [i32; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let e: [i32; 16] = [1, 2, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [i32; 16] = [0i32; 16];
vst4q_lane_s32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_lane_u8() {
let a: [u8; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [u8; 32] = [1, 2, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u8; 32] = [0u8; 32];
vst4_lane_u8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_lane_u16() {
let a: [u16; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let e: [u16; 16] = [1, 2, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u16; 16] = [0u16; 16];
vst4_lane_u16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_lane_u32() {
let a: [u32; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8];
let e: [u32; 8] = [1, 2, 2, 6, 0, 0, 0, 0];
let mut r: [u32; 8] = [0u32; 8];
vst4_lane_u32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4q_lane_u16() {
let a: [u16; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [u16; 32] = [1, 2, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u16; 32] = [0u16; 32];
vst4q_lane_u16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4q_lane_u32() {
let a: [u32; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let e: [u32; 16] = [1, 2, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u32; 16] = [0u32; 16];
vst4q_lane_u32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_lane_p8() {
let a: [u8; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [u8; 32] = [1, 2, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u8; 32] = [0u8; 32];
vst4_lane_p8::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_lane_p16() {
let a: [u16; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16];
let e: [u16; 16] = [1, 2, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u16; 16] = [0u16; 16];
vst4_lane_p16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4q_lane_p16() {
let a: [u16; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32];
let e: [u16; 32] = [1, 2, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let mut r: [u16; 32] = [0u16; 32];
vst4q_lane_p16::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4_lane_f32() {
let a: [f32; 9] = [0., 1., 2., 2., 6., 2., 6., 6., 8.];
let e: [f32; 8] = [1., 2., 2., 6., 0., 0., 0., 0.];
let mut r: [f32; 8] = [0f32; 8];
vst4_lane_f32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vst4q_lane_f32() {
let a: [f32; 17] = [0., 1., 2., 2., 6., 2., 6., 6., 8., 2., 6., 6., 8., 6., 8., 8., 16.];
let e: [f32; 16] = [1., 2., 2., 6., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.];
let mut r: [f32; 16] = [0f32; 16];
vst4q_lane_f32::<0>(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_s8() {
let a: i8x8 = i8x8::new(1, 2, 1, 2, 1, 2, 1, 2);
let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: i8x8 = i8x8::new(1, 4, 3, 8, 5, 12, 7, 16);
let r: i8x8 = transmute(vmul_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_s8() {
let a: i8x16 = i8x16::new(1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2);
let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: i8x16 = i8x16::new(1, 4, 3, 8, 5, 12, 7, 16, 9, 20, 11, 24, 13, 28, 15, 32);
let r: i8x16 = transmute(vmulq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_s16() {
let a: i16x4 = i16x4::new(1, 2, 1, 2);
let b: i16x4 = i16x4::new(1, 2, 3, 4);
let e: i16x4 = i16x4::new(1, 4, 3, 8);
let r: i16x4 = transmute(vmul_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_s16() {
let a: i16x8 = i16x8::new(1, 2, 1, 2, 1, 2, 1, 2);
let b: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: i16x8 = i16x8::new(1, 4, 3, 8, 5, 12, 7, 16);
let r: i16x8 = transmute(vmulq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32x2 = i32x2::new(1, 2);
let e: i32x2 = i32x2::new(1, 4);
let r: i32x2 = transmute(vmul_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_s32() {
let a: i32x4 = i32x4::new(1, 2, 1, 2);
let b: i32x4 = i32x4::new(1, 2, 3, 4);
let e: i32x4 = i32x4::new(1, 4, 3, 8);
let r: i32x4 = transmute(vmulq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_u8() {
let a: u8x8 = u8x8::new(1, 2, 1, 2, 1, 2, 1, 2);
let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u8x8 = u8x8::new(1, 4, 3, 8, 5, 12, 7, 16);
let r: u8x8 = transmute(vmul_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_u8() {
let a: u8x16 = u8x16::new(1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2);
let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: u8x16 = u8x16::new(1, 4, 3, 8, 5, 12, 7, 16, 9, 20, 11, 24, 13, 28, 15, 32);
let r: u8x16 = transmute(vmulq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_u16() {
let a: u16x4 = u16x4::new(1, 2, 1, 2);
let b: u16x4 = u16x4::new(1, 2, 3, 4);
let e: u16x4 = u16x4::new(1, 4, 3, 8);
let r: u16x4 = transmute(vmul_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_u16() {
let a: u16x8 = u16x8::new(1, 2, 1, 2, 1, 2, 1, 2);
let b: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u16x8 = u16x8::new(1, 4, 3, 8, 5, 12, 7, 16);
let r: u16x8 = transmute(vmulq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u32x2 = u32x2::new(1, 2);
let e: u32x2 = u32x2::new(1, 4);
let r: u32x2 = transmute(vmul_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_u32() {
let a: u32x4 = u32x4::new(1, 2, 1, 2);
let b: u32x4 = u32x4::new(1, 2, 3, 4);
let e: u32x4 = u32x4::new(1, 4, 3, 8);
let r: u32x4 = transmute(vmulq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_p8() {
let a: i8x8 = i8x8::new(1, 3, 1, 3, 1, 3, 1, 3);
let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: i8x8 = i8x8::new(1, 6, 3, 12, 5, 10, 7, 24);
let r: i8x8 = transmute(vmul_p8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_p8() {
let a: i8x16 = i8x16::new(1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3);
let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: i8x16 = i8x16::new(1, 6, 3, 12, 5, 10, 7, 24, 9, 30, 11, 20, 13, 18, 15, 48);
let r: i8x16 = transmute(vmulq_p8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_f32() {
let a: f32x2 = f32x2::new(1.0, 2.0);
let b: f32x2 = f32x2::new(2.0, 3.0);
let e: f32x2 = f32x2::new(2.0, 6.0);
let r: f32x2 = transmute(vmul_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_f32() {
let a: f32x4 = f32x4::new(1.0, 2.0, 1.0, 2.0);
let b: f32x4 = f32x4::new(2.0, 3.0, 4.0, 5.0);
let e: f32x4 = f32x4::new(2.0, 6.0, 4.0, 10.0);
let r: f32x4 = transmute(vmulq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_n_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16 = 2;
let e: i16x4 = i16x4::new(2, 4, 6, 8);
let r: i16x4 = transmute(vmul_n_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_n_s16() {
let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i16 = 2;
let e: i16x8 = i16x8::new(2, 4, 6, 8, 10, 12, 14, 16);
let r: i16x8 = transmute(vmulq_n_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_n_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32 = 2;
let e: i32x2 = i32x2::new(2, 4);
let r: i32x2 = transmute(vmul_n_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_n_s32() {
let a: i32x4 = i32x4::new(1, 2, 3, 4);
let b: i32 = 2;
let e: i32x4 = i32x4::new(2, 4, 6, 8);
let r: i32x4 = transmute(vmulq_n_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_n_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let b: u16 = 2;
let e: u16x4 = u16x4::new(2, 4, 6, 8);
let r: u16x4 = transmute(vmul_n_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_n_u16() {
let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u16 = 2;
let e: u16x8 = u16x8::new(2, 4, 6, 8, 10, 12, 14, 16);
let r: u16x8 = transmute(vmulq_n_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_n_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u32 = 2;
let e: u32x2 = u32x2::new(2, 4);
let r: u32x2 = transmute(vmul_n_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_n_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let b: u32 = 2;
let e: u32x4 = u32x4::new(2, 4, 6, 8);
let r: u32x4 = transmute(vmulq_n_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_n_f32() {
let a: f32x2 = f32x2::new(1., 2.);
let b: f32 = 2.;
let e: f32x2 = f32x2::new(2., 4.);
let r: f32x2 = transmute(vmul_n_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_n_f32() {
let a: f32x4 = f32x4::new(1., 2., 3., 4.);
let b: f32 = 2.;
let e: f32x4 = f32x4::new(2., 4., 6., 8.);
let r: f32x4 = transmute(vmulq_n_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_lane_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(0, 2, 0, 0);
let e: i16x4 = i16x4::new(2, 4, 6, 8);
let r: i16x4 = transmute(vmul_lane_s16::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_laneq_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
let e: i16x4 = i16x4::new(2, 4, 6, 8);
let r: i16x4 = transmute(vmul_laneq_s16::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_lane_s16() {
let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i16x4 = i16x4::new(0, 2, 0, 0);
let e: i16x8 = i16x8::new(2, 4, 6, 8, 10, 12, 14, 16);
let r: i16x8 = transmute(vmulq_lane_s16::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_laneq_s16() {
let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
let e: i16x8 = i16x8::new(2, 4, 6, 8, 10, 12, 14, 16);
let r: i16x8 = transmute(vmulq_laneq_s16::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_lane_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32x2 = i32x2::new(0, 2);
let e: i32x2 = i32x2::new(2, 4);
let r: i32x2 = transmute(vmul_lane_s32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_laneq_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32x4 = i32x4::new(0, 2, 0, 0);
let e: i32x2 = i32x2::new(2, 4);
let r: i32x2 = transmute(vmul_laneq_s32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_lane_s32() {
let a: i32x4 = i32x4::new(1, 2, 3, 4);
let b: i32x2 = i32x2::new(0, 2);
let e: i32x4 = i32x4::new(2, 4, 6, 8);
let r: i32x4 = transmute(vmulq_lane_s32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_laneq_s32() {
let a: i32x4 = i32x4::new(1, 2, 3, 4);
let b: i32x4 = i32x4::new(0, 2, 0, 0);
let e: i32x4 = i32x4::new(2, 4, 6, 8);
let r: i32x4 = transmute(vmulq_laneq_s32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_lane_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let b: u16x4 = u16x4::new(0, 2, 0, 0);
let e: u16x4 = u16x4::new(2, 4, 6, 8);
let r: u16x4 = transmute(vmul_lane_u16::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_laneq_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let b: u16x8 = u16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
let e: u16x4 = u16x4::new(2, 4, 6, 8);
let r: u16x4 = transmute(vmul_laneq_u16::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_lane_u16() {
let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u16x4 = u16x4::new(0, 2, 0, 0);
let e: u16x8 = u16x8::new(2, 4, 6, 8, 10, 12, 14, 16);
let r: u16x8 = transmute(vmulq_lane_u16::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_laneq_u16() {
let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u16x8 = u16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
let e: u16x8 = u16x8::new(2, 4, 6, 8, 10, 12, 14, 16);
let r: u16x8 = transmute(vmulq_laneq_u16::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_lane_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u32x2 = u32x2::new(0, 2);
let e: u32x2 = u32x2::new(2, 4);
let r: u32x2 = transmute(vmul_lane_u32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_laneq_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u32x4 = u32x4::new(0, 2, 0, 0);
let e: u32x2 = u32x2::new(2, 4);
let r: u32x2 = transmute(vmul_laneq_u32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_lane_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let b: u32x2 = u32x2::new(0, 2);
let e: u32x4 = u32x4::new(2, 4, 6, 8);
let r: u32x4 = transmute(vmulq_lane_u32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_laneq_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let b: u32x4 = u32x4::new(0, 2, 0, 0);
let e: u32x4 = u32x4::new(2, 4, 6, 8);
let r: u32x4 = transmute(vmulq_laneq_u32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_lane_f32() {
let a: f32x2 = f32x2::new(1., 2.);
let b: f32x2 = f32x2::new(2., 0.);
let e: f32x2 = f32x2::new(2., 4.);
let r: f32x2 = transmute(vmul_lane_f32::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmul_laneq_f32() {
let a: f32x2 = f32x2::new(1., 2.);
let b: f32x4 = f32x4::new(2., 0., 0., 0.);
let e: f32x2 = f32x2::new(2., 4.);
let r: f32x2 = transmute(vmul_laneq_f32::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_lane_f32() {
let a: f32x4 = f32x4::new(1., 2., 3., 4.);
let b: f32x2 = f32x2::new(2., 0.);
let e: f32x4 = f32x4::new(2., 4., 6., 8.);
let r: f32x4 = transmute(vmulq_lane_f32::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmulq_laneq_f32() {
let a: f32x4 = f32x4::new(1., 2., 3., 4.);
let b: f32x4 = f32x4::new(2., 0., 0., 0.);
let e: f32x4 = f32x4::new(2., 4., 6., 8.);
let r: f32x4 = transmute(vmulq_laneq_f32::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_s8() {
let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i8x8 = i8x8::new(1, 2, 1, 2, 1, 2, 1, 2);
let e: i16x8 = i16x8::new(1, 4, 3, 8, 5, 12, 7, 16);
let r: i16x8 = transmute(vmull_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(1, 2, 1, 2);
let e: i32x4 = i32x4::new(1, 4, 3, 8);
let r: i32x4 = transmute(vmull_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32x2 = i32x2::new(1, 2);
let e: i64x2 = i64x2::new(1, 4);
let r: i64x2 = transmute(vmull_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_u8() {
let a: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u8x8 = u8x8::new(1, 2, 1, 2, 1, 2, 1, 2);
let e: u16x8 = u16x8::new(1, 4, 3, 8, 5, 12, 7, 16);
let r: u16x8 = transmute(vmull_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let b: u16x4 = u16x4::new(1, 2, 1, 2);
let e: u32x4 = u32x4::new(1, 4, 3, 8);
let r: u32x4 = transmute(vmull_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u32x2 = u32x2::new(1, 2);
let e: u64x2 = u64x2::new(1, 4);
let r: u64x2 = transmute(vmull_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_p8() {
let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i8x8 = i8x8::new(1, 3, 1, 3, 1, 3, 1, 3);
let e: i16x8 = i16x8::new(1, 6, 3, 12, 5, 10, 7, 24);
let r: i16x8 = transmute(vmull_p8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_n_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16 = 2;
let e: i32x4 = i32x4::new(2, 4, 6, 8);
let r: i32x4 = transmute(vmull_n_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_n_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32 = 2;
let e: i64x2 = i64x2::new(2, 4);
let r: i64x2 = transmute(vmull_n_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_n_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let b: u16 = 2;
let e: u32x4 = u32x4::new(2, 4, 6, 8);
let r: u32x4 = transmute(vmull_n_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_n_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u32 = 2;
let e: u64x2 = u64x2::new(2, 4);
let r: u64x2 = transmute(vmull_n_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_lane_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(0, 2, 0, 0);
let e: i32x4 = i32x4::new(2, 4, 6, 8);
let r: i32x4 = transmute(vmull_lane_s16::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_laneq_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
let e: i32x4 = i32x4::new(2, 4, 6, 8);
let r: i32x4 = transmute(vmull_laneq_s16::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_lane_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32x2 = i32x2::new(0, 2);
let e: i64x2 = i64x2::new(2, 4);
let r: i64x2 = transmute(vmull_lane_s32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_laneq_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32x4 = i32x4::new(0, 2, 0, 0);
let e: i64x2 = i64x2::new(2, 4);
let r: i64x2 = transmute(vmull_laneq_s32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_lane_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let b: u16x4 = u16x4::new(0, 2, 0, 0);
let e: u32x4 = u32x4::new(2, 4, 6, 8);
let r: u32x4 = transmute(vmull_lane_u16::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_laneq_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let b: u16x8 = u16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
let e: u32x4 = u32x4::new(2, 4, 6, 8);
let r: u32x4 = transmute(vmull_laneq_u16::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_lane_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u32x2 = u32x2::new(0, 2);
let e: u64x2 = u64x2::new(2, 4);
let r: u64x2 = transmute(vmull_lane_u32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmull_laneq_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u32x4 = u32x4::new(0, 2, 0, 0);
let e: u64x2 = u64x2::new(2, 4);
let r: u64x2 = transmute(vmull_laneq_u32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vfma_f32() {
let a: f32x2 = f32x2::new(8.0, 18.0);
let b: f32x2 = f32x2::new(6.0, 4.0);
let c: f32x2 = f32x2::new(2.0, 3.0);
let e: f32x2 = f32x2::new(20.0, 30.0);
let r: f32x2 = transmute(vfma_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vfmaq_f32() {
let a: f32x4 = f32x4::new(8.0, 18.0, 12.0, 10.0);
let b: f32x4 = f32x4::new(6.0, 4.0, 7.0, 8.0);
let c: f32x4 = f32x4::new(2.0, 3.0, 4.0, 5.0);
let e: f32x4 = f32x4::new(20.0, 30.0, 40.0, 50.0);
let r: f32x4 = transmute(vfmaq_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vfma_n_f32() {
let a: f32x2 = f32x2::new(2.0, 3.0);
let b: f32x2 = f32x2::new(6.0, 4.0);
let c: f32 = 8.0;
let e: f32x2 = f32x2::new(50.0, 35.0);
let r: f32x2 = transmute(vfma_n_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vfmaq_n_f32() {
let a: f32x4 = f32x4::new(2.0, 3.0, 4.0, 5.0);
let b: f32x4 = f32x4::new(6.0, 4.0, 7.0, 8.0);
let c: f32 = 8.0;
let e: f32x4 = f32x4::new(50.0, 35.0, 60.0, 69.0);
let r: f32x4 = transmute(vfmaq_n_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vfms_f32() {
let a: f32x2 = f32x2::new(20.0, 30.0);
let b: f32x2 = f32x2::new(6.0, 4.0);
let c: f32x2 = f32x2::new(2.0, 3.0);
let e: f32x2 = f32x2::new(8.0, 18.0);
let r: f32x2 = transmute(vfms_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vfmsq_f32() {
let a: f32x4 = f32x4::new(20.0, 30.0, 40.0, 50.0);
let b: f32x4 = f32x4::new(6.0, 4.0, 7.0, 8.0);
let c: f32x4 = f32x4::new(2.0, 3.0, 4.0, 5.0);
let e: f32x4 = f32x4::new(8.0, 18.0, 12.0, 10.0);
let r: f32x4 = transmute(vfmsq_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vfms_n_f32() {
let a: f32x2 = f32x2::new(50.0, 35.0);
let b: f32x2 = f32x2::new(6.0, 4.0);
let c: f32 = 8.0;
let e: f32x2 = f32x2::new(2.0, 3.0);
let r: f32x2 = transmute(vfms_n_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vfmsq_n_f32() {
let a: f32x4 = f32x4::new(50.0, 35.0, 60.0, 69.0);
let b: f32x4 = f32x4::new(6.0, 4.0, 7.0, 8.0);
let c: f32 = 8.0;
let e: f32x4 = f32x4::new(2.0, 3.0, 4.0, 5.0);
let r: f32x4 = transmute(vfmsq_n_f32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsub_s8() {
let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i8x8 = i8x8::new(1, 2, 1, 2, 1, 2, 1, 2);
let e: i8x8 = i8x8::new(0, 0, 2, 2, 4, 4, 6, 6);
let r: i8x8 = transmute(vsub_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubq_s8() {
let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: i8x16 = i8x16::new(1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2);
let e: i8x16 = i8x16::new(0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14);
let r: i8x16 = transmute(vsubq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsub_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(1, 2, 1, 2);
let e: i16x4 = i16x4::new(0, 0, 2, 2);
let r: i16x4 = transmute(vsub_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubq_s16() {
let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i16x8 = i16x8::new(1, 2, 1, 2, 1, 2, 1, 2);
let e: i16x8 = i16x8::new(0, 0, 2, 2, 4, 4, 6, 6);
let r: i16x8 = transmute(vsubq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsub_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32x2 = i32x2::new(1, 2);
let e: i32x2 = i32x2::new(0, 0);
let r: i32x2 = transmute(vsub_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubq_s32() {
let a: i32x4 = i32x4::new(1, 2, 3, 4);
let b: i32x4 = i32x4::new(1, 2, 1, 2);
let e: i32x4 = i32x4::new(0, 0, 2, 2);
let r: i32x4 = transmute(vsubq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsub_u8() {
let a: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u8x8 = u8x8::new(1, 2, 1, 2, 1, 2, 1, 2);
let e: u8x8 = u8x8::new(0, 0, 2, 2, 4, 4, 6, 6);
let r: u8x8 = transmute(vsub_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubq_u8() {
let a: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: u8x16 = u8x16::new(1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2);
let e: u8x16 = u8x16::new(0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14);
let r: u8x16 = transmute(vsubq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsub_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let b: u16x4 = u16x4::new(1, 2, 1, 2);
let e: u16x4 = u16x4::new(0, 0, 2, 2);
let r: u16x4 = transmute(vsub_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubq_u16() {
let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u16x8 = u16x8::new(1, 2, 1, 2, 1, 2, 1, 2);
let e: u16x8 = u16x8::new(0, 0, 2, 2, 4, 4, 6, 6);
let r: u16x8 = transmute(vsubq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsub_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u32x2 = u32x2::new(1, 2);
let e: u32x2 = u32x2::new(0, 0);
let r: u32x2 = transmute(vsub_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubq_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let b: u32x4 = u32x4::new(1, 2, 1, 2);
let e: u32x4 = u32x4::new(0, 0, 2, 2);
let r: u32x4 = transmute(vsubq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsub_s64() {
let a: i64x1 = i64x1::new(1);
let b: i64x1 = i64x1::new(1);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vsub_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubq_s64() {
let a: i64x2 = i64x2::new(1, 2);
let b: i64x2 = i64x2::new(1, 2);
let e: i64x2 = i64x2::new(0, 0);
let r: i64x2 = transmute(vsubq_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsub_u64() {
let a: u64x1 = u64x1::new(1);
let b: u64x1 = u64x1::new(1);
let e: u64x1 = u64x1::new(0);
let r: u64x1 = transmute(vsub_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubq_u64() {
let a: u64x2 = u64x2::new(1, 2);
let b: u64x2 = u64x2::new(1, 2);
let e: u64x2 = u64x2::new(0, 0);
let r: u64x2 = transmute(vsubq_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsub_f32() {
let a: f32x2 = f32x2::new(1.0, 4.0);
let b: f32x2 = f32x2::new(1.0, 2.0);
let e: f32x2 = f32x2::new(0.0, 2.0);
let r: f32x2 = transmute(vsub_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubq_f32() {
let a: f32x4 = f32x4::new(1.0, 4.0, 3.0, 8.0);
let b: f32x4 = f32x4::new(1.0, 2.0, 3.0, 4.0);
let e: f32x4 = f32x4::new(0.0, 2.0, 0.0, 4.0);
let r: f32x4 = transmute(vsubq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vadd_p8() {
let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i8x8 = i8x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let e: i8x8 = i8x8::new(0, 3, 2, 5, 4, 7, 6, 9);
let r: i8x8 = transmute(vadd_p8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vadd_p16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(1, 1, 1, 1);
let e: i16x4 = i16x4::new(0, 3, 2, 5);
let r: i16x4 = transmute(vadd_p16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vaddq_p8() {
let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: i8x16 = i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
let e: i8x16 = i8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14, 17);
let r: i8x16 = transmute(vaddq_p8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vaddq_p16() {
let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let e: i16x8 = i16x8::new(0, 3, 2, 5, 4, 7, 6, 9);
let r: i16x8 = transmute(vaddq_p16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vadd_p64() {
let a: i64x1 = i64x1::new(1);
let b: i64x1 = i64x1::new(1);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vadd_p64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vaddq_p64() {
let a: i64x2 = i64x2::new(1, 2);
let b: i64x2 = i64x2::new(1, 1);
let e: i64x2 = i64x2::new(0, 3);
let r: i64x2 = transmute(vaddq_p64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vaddq_p128() {
let a: p128 = 16;
let b: p128 = 1;
let e: p128 = 17;
let r: p128 = transmute(vaddq_p128(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubhn_s16() {
let a: i16x8 = i16x8::new(0x7F_FF, -32768, 1, 1, 0x7F_FF, -32768, 1, 1);
let b: i16x8 = i16x8::new(1, 0, 0, 0, 1, 0, 0, 0);
let e: i8x8 = i8x8::new(0x7F, -128, 0, 0, 0x7F, -128, 0, 0);
let r: i8x8 = transmute(vsubhn_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubhn_s32() {
let a: i32x4 = i32x4::new(0x7F_FF_FF_FF, -2147483648, 1, 1);
let b: i32x4 = i32x4::new(1, 0, 0, 0);
let e: i16x4 = i16x4::new(0x7F_FF, -32768, 0, 0);
let r: i16x4 = transmute(vsubhn_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubhn_s64() {
let a: i64x2 = i64x2::new(0x7F_FF_FF_FF_FF_FF_FF_FF, -9223372036854775808);
let b: i64x2 = i64x2::new(1, 0);
let e: i32x2 = i32x2::new(0x7F_FF_FF_FF, -2147483648);
let r: i32x2 = transmute(vsubhn_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubhn_u16() {
let a: u16x8 = u16x8::new(0xFF_FF, 0, 1, 1, 0xFF_FF, 0, 1, 1);
let b: u16x8 = u16x8::new(1, 0, 0, 0, 1, 0, 0, 0);
let e: u8x8 = u8x8::new(0xFF, 0, 0, 0, 0xFF, 0, 0, 0);
let r: u8x8 = transmute(vsubhn_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubhn_u32() {
let a: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0, 1, 1);
let b: u32x4 = u32x4::new(1, 0, 0, 0);
let e: u16x4 = u16x4::new(0xFF_FF, 0, 0, 0);
let r: u16x4 = transmute(vsubhn_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubhn_u64() {
let a: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0);
let b: u64x2 = u64x2::new(1, 0);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0);
let r: u32x2 = transmute(vsubhn_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubhn_high_s16() {
let a: i8x8 = i8x8::new(0x7F, 0, 0x7F, 0, 0x7F, 0, 0x7F, 0);
let b: i16x8 = i16x8::new(0x7F_FF, 1, 0x7F_FF, 1, 0x7F_FF, 1, 0x7F_FF, 1);
let c: i16x8 = i16x8::new(1, 0, 1, 0, 1, 0, 1, 0);
let e: i8x16 = i8x16::new(0x7F, 0, 0x7F, 0, 0x7F, 0, 0x7F, 0, 0x7F, 0, 0x7F, 0, 0x7F, 0, 0x7F, 0);
let r: i8x16 = transmute(vsubhn_high_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubhn_high_s32() {
let a: i16x4 = i16x4::new(0x7F_FF, 0, 0x7F_FF, 0);
let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 1, 0x7F_FF_FF_FF, 1);
let c: i32x4 = i32x4::new(1, 0, 1, 0);
let e: i16x8 = i16x8::new(0x7F_FF, 0, 0x7F_FF, 0, 0x7F_FF, 0, 0x7F_FF, 0);
let r: i16x8 = transmute(vsubhn_high_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubhn_high_s64() {
let a: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0);
let b: i64x2 = i64x2::new(0x7F_FF_FF_FF_FF_FF_FF_FF, 1);
let c: i64x2 = i64x2::new(1, 0);
let e: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0, 0x7F_FF_FF_FF, 0);
let r: i32x4 = transmute(vsubhn_high_s64(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubhn_high_u16() {
let a: u8x8 = u8x8::new(0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0);
let b: u16x8 = u16x8::new(0xFF_FF, 1, 0xFF_FF, 1, 0xFF_FF, 1, 0xFF_FF, 1);
let c: u16x8 = u16x8::new(1, 0, 1, 0, 1, 0, 1, 0);
let e: u8x16 = u8x16::new(0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF, 0);
let r: u8x16 = transmute(vsubhn_high_u16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubhn_high_u32() {
let a: u16x4 = u16x4::new(0xFF_FF, 0, 0xFF_FF, 0);
let b: u32x4 = u32x4::new(0xFF_FF_FF_FF, 1, 0xFF_FF_FF_FF, 1);
let c: u32x4 = u32x4::new(1, 0, 1, 0);
let e: u16x8 = u16x8::new(0xFF_FF, 0, 0xFF_FF, 0, 0xFF_FF, 0, 0xFF_FF, 0);
let r: u16x8 = transmute(vsubhn_high_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubhn_high_u64() {
let a: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0);
let b: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 1);
let c: u64x2 = u64x2::new(1, 0);
let e: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0, 0xFF_FF_FF_FF, 0);
let r: u32x4 = transmute(vsubhn_high_u64(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhsub_u8() {
let a: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u8x8 = u8x8::new(1, 2, 1, 2, 1, 2, 1, 2);
let e: u8x8 = u8x8::new(0, 0, 1, 1, 2, 2, 3, 3);
let r: u8x8 = transmute(vhsub_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhsubq_u8() {
let a: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: u8x16 = u8x16::new(1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2);
let e: u8x16 = u8x16::new(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7);
let r: u8x16 = transmute(vhsubq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhsub_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let b: u16x4 = u16x4::new(1, 2, 1, 2);
let e: u16x4 = u16x4::new(0, 0, 1, 1);
let r: u16x4 = transmute(vhsub_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhsubq_u16() {
let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u16x8 = u16x8::new(1, 2, 1, 2, 1, 2, 1, 2);
let e: u16x8 = u16x8::new(0, 0, 1, 1, 2, 2, 3, 3);
let r: u16x8 = transmute(vhsubq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhsub_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u32x2 = u32x2::new(1, 2);
let e: u32x2 = u32x2::new(0, 0);
let r: u32x2 = transmute(vhsub_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhsubq_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let b: u32x4 = u32x4::new(1, 2, 1, 2);
let e: u32x4 = u32x4::new(0, 0, 1, 1);
let r: u32x4 = transmute(vhsubq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhsub_s8() {
let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i8x8 = i8x8::new(1, 2, 1, 2, 1, 2, 1, 2);
let e: i8x8 = i8x8::new(0, 0, 1, 1, 2, 2, 3, 3);
let r: i8x8 = transmute(vhsub_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhsubq_s8() {
let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: i8x16 = i8x16::new(1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2);
let e: i8x16 = i8x16::new(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7);
let r: i8x16 = transmute(vhsubq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhsub_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(1, 2, 1, 2);
let e: i16x4 = i16x4::new(0, 0, 1, 1);
let r: i16x4 = transmute(vhsub_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhsubq_s16() {
let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i16x8 = i16x8::new(1, 2, 1, 2, 1, 2, 1, 2);
let e: i16x8 = i16x8::new(0, 0, 1, 1, 2, 2, 3, 3);
let r: i16x8 = transmute(vhsubq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhsub_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32x2 = i32x2::new(1, 2);
let e: i32x2 = i32x2::new(0, 0);
let r: i32x2 = transmute(vhsub_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vhsubq_s32() {
let a: i32x4 = i32x4::new(1, 2, 3, 4);
let b: i32x4 = i32x4::new(1, 2, 1, 2);
let e: i32x4 = i32x4::new(0, 0, 1, 1);
let r: i32x4 = transmute(vhsubq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubw_s8() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: i16x8 = transmute(vsubw_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubw_s16() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let b: i16x4 = i16x4::new(0, 1, 2, 3);
let e: i32x4 = i32x4::new(0, 0, 0, 0);
let r: i32x4 = transmute(vsubw_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubw_s32() {
let a: i64x2 = i64x2::new(0, 1);
let b: i32x2 = i32x2::new(0, 1);
let e: i64x2 = i64x2::new(0, 0);
let r: i64x2 = transmute(vsubw_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubw_u8() {
let a: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: u16x8 = transmute(vsubw_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubw_u16() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let b: u16x4 = u16x4::new(0, 1, 2, 3);
let e: u32x4 = u32x4::new(0, 0, 0, 0);
let r: u32x4 = transmute(vsubw_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubw_u32() {
let a: u64x2 = u64x2::new(0, 1);
let b: u32x2 = u32x2::new(0, 1);
let e: u64x2 = u64x2::new(0, 0);
let r: u64x2 = transmute(vsubw_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubl_s8() {
let a: i8x8 = i8x8::new(0x7F, -128, 2, 3, 4, 5, 6, 7);
let b: i8x8 = i8x8::new(0x7F, -128, 2, 3, 4, 5, 6, 7);
let e: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: i16x8 = transmute(vsubl_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubl_s16() {
let a: i16x4 = i16x4::new(0x7F_FF, -32768, 2, 3);
let b: i16x4 = i16x4::new(0x7F_FF, -32768, 2, 3);
let e: i32x4 = i32x4::new(0, 0, 0, 0);
let r: i32x4 = transmute(vsubl_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubl_s32() {
let a: i32x2 = i32x2::new(0x7F_FF_FF_FF, -2147483648);
let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, -2147483648);
let e: i64x2 = i64x2::new(0, 0);
let r: i64x2 = transmute(vsubl_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubl_u8() {
let a: u8x8 = u8x8::new(0xFF, 0, 2, 3, 4, 5, 6, 7);
let b: u8x8 = u8x8::new(0xFF, 0, 2, 3, 4, 5, 6, 7);
let e: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: u16x8 = transmute(vsubl_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubl_u16() {
let a: u16x4 = u16x4::new(0xFF_FF, 0, 2, 3);
let b: u16x4 = u16x4::new(0xFF_FF, 0, 2, 3);
let e: u32x4 = u32x4::new(0, 0, 0, 0);
let r: u32x4 = transmute(vsubl_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsubl_u32() {
let a: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0);
let b: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0);
let e: u64x2 = u64x2::new(0, 0);
let r: u64x2 = transmute(vsubl_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmax_s8() {
let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i8x8 = i8x8::new(16, 15, 14, 13, 12, 11, 10, 9);
let e: i8x8 = i8x8::new(16, 15, 14, 13, 12, 11, 10, 9);
let r: i8x8 = transmute(vmax_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmaxq_s8() {
let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: i8x16 = i8x16::new(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1);
let e: i8x16 = i8x16::new(16, 15, 14, 13, 12, 11, 10, 9, 9, 10, 11, 12, 13, 14, 15, 16);
let r: i8x16 = transmute(vmaxq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmax_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(16, 15, 14, 13);
let e: i16x4 = i16x4::new(16, 15, 14, 13);
let r: i16x4 = transmute(vmax_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmaxq_s16() {
let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i16x8 = i16x8::new(16, 15, 14, 13, 12, 11, 10, 9);
let e: i16x8 = i16x8::new(16, 15, 14, 13, 12, 11, 10, 9);
let r: i16x8 = transmute(vmaxq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmax_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32x2 = i32x2::new(16, 15);
let e: i32x2 = i32x2::new(16, 15);
let r: i32x2 = transmute(vmax_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmaxq_s32() {
let a: i32x4 = i32x4::new(1, 2, 3, 4);
let b: i32x4 = i32x4::new(16, 15, 14, 13);
let e: i32x4 = i32x4::new(16, 15, 14, 13);
let r: i32x4 = transmute(vmaxq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmax_u8() {
let a: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u8x8 = u8x8::new(16, 15, 14, 13, 12, 11, 10, 9);
let e: u8x8 = u8x8::new(16, 15, 14, 13, 12, 11, 10, 9);
let r: u8x8 = transmute(vmax_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmaxq_u8() {
let a: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: u8x16 = u8x16::new(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1);
let e: u8x16 = u8x16::new(16, 15, 14, 13, 12, 11, 10, 9, 9, 10, 11, 12, 13, 14, 15, 16);
let r: u8x16 = transmute(vmaxq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmax_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let b: u16x4 = u16x4::new(16, 15, 14, 13);
let e: u16x4 = u16x4::new(16, 15, 14, 13);
let r: u16x4 = transmute(vmax_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmaxq_u16() {
let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u16x8 = u16x8::new(16, 15, 14, 13, 12, 11, 10, 9);
let e: u16x8 = u16x8::new(16, 15, 14, 13, 12, 11, 10, 9);
let r: u16x8 = transmute(vmaxq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmax_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u32x2 = u32x2::new(16, 15);
let e: u32x2 = u32x2::new(16, 15);
let r: u32x2 = transmute(vmax_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmaxq_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let b: u32x4 = u32x4::new(16, 15, 14, 13);
let e: u32x4 = u32x4::new(16, 15, 14, 13);
let r: u32x4 = transmute(vmaxq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmax_f32() {
let a: f32x2 = f32x2::new(1.0, -2.0);
let b: f32x2 = f32x2::new(0.0, 3.0);
let e: f32x2 = f32x2::new(1.0, 3.0);
let r: f32x2 = transmute(vmax_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmaxq_f32() {
let a: f32x4 = f32x4::new(1.0, -2.0, 3.0, -4.0);
let b: f32x4 = f32x4::new(0.0, 3.0, 2.0, 8.0);
let e: f32x4 = f32x4::new(1.0, 3.0, 3.0, 8.0);
let r: f32x4 = transmute(vmaxq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmaxnm_f32() {
let a: f32x2 = f32x2::new(1.0, 2.0);
let b: f32x2 = f32x2::new(8.0, 16.0);
let e: f32x2 = f32x2::new(8.0, 16.0);
let r: f32x2 = transmute(vmaxnm_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmaxnmq_f32() {
let a: f32x4 = f32x4::new(1.0, 2.0, 3.0, -4.0);
let b: f32x4 = f32x4::new(8.0, 16.0, -1.0, 6.0);
let e: f32x4 = f32x4::new(8.0, 16.0, 3.0, 6.0);
let r: f32x4 = transmute(vmaxnmq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmin_s8() {
let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i8x8 = i8x8::new(16, 15, 14, 13, 12, 11, 10, 9);
let e: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: i8x8 = transmute(vmin_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vminq_s8() {
let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: i8x16 = i8x16::new(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1);
let e: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1);
let r: i8x16 = transmute(vminq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmin_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(16, 15, 14, 13);
let e: i16x4 = i16x4::new(1, 2, 3, 4);
let r: i16x4 = transmute(vmin_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vminq_s16() {
let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i16x8 = i16x8::new(16, 15, 14, 13, 12, 11, 10, 9);
let e: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: i16x8 = transmute(vminq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmin_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32x2 = i32x2::new(16, 15);
let e: i32x2 = i32x2::new(1, 2);
let r: i32x2 = transmute(vmin_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vminq_s32() {
let a: i32x4 = i32x4::new(1, 2, 3, 4);
let b: i32x4 = i32x4::new(16, 15, 14, 13);
let e: i32x4 = i32x4::new(1, 2, 3, 4);
let r: i32x4 = transmute(vminq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmin_u8() {
let a: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u8x8 = u8x8::new(16, 15, 14, 13, 12, 11, 10, 9);
let e: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: u8x8 = transmute(vmin_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vminq_u8() {
let a: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: u8x16 = u8x16::new(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1);
let e: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1);
let r: u8x16 = transmute(vminq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmin_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let b: u16x4 = u16x4::new(16, 15, 14, 13);
let e: u16x4 = u16x4::new(1, 2, 3, 4);
let r: u16x4 = transmute(vmin_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vminq_u16() {
let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u16x8 = u16x8::new(16, 15, 14, 13, 12, 11, 10, 9);
let e: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: u16x8 = transmute(vminq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmin_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u32x2 = u32x2::new(16, 15);
let e: u32x2 = u32x2::new(1, 2);
let r: u32x2 = transmute(vmin_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vminq_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let b: u32x4 = u32x4::new(16, 15, 14, 13);
let e: u32x4 = u32x4::new(1, 2, 3, 4);
let r: u32x4 = transmute(vminq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vmin_f32() {
let a: f32x2 = f32x2::new(1.0, -2.0);
let b: f32x2 = f32x2::new(0.0, 3.0);
let e: f32x2 = f32x2::new(0.0, -2.0);
let r: f32x2 = transmute(vmin_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vminq_f32() {
let a: f32x4 = f32x4::new(1.0, -2.0, 3.0, -4.0);
let b: f32x4 = f32x4::new(0.0, 3.0, 2.0, 8.0);
let e: f32x4 = f32x4::new(0.0, -2.0, 2.0, -4.0);
let r: f32x4 = transmute(vminq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vminnm_f32() {
let a: f32x2 = f32x2::new(1.0, 2.0);
let b: f32x2 = f32x2::new(8.0, 16.0);
let e: f32x2 = f32x2::new(1.0, 2.0);
let r: f32x2 = transmute(vminnm_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vminnmq_f32() {
let a: f32x4 = f32x4::new(1.0, 2.0, 3.0, -4.0);
let b: f32x4 = f32x4::new(8.0, 16.0, -1.0, 6.0);
let e: f32x4 = f32x4::new(1.0, 2.0, -1.0, -4.0);
let r: f32x4 = transmute(vminnmq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vpadd_f32() {
let a: f32x2 = f32x2::new(1., 2.);
let b: f32x2 = f32x2::new(3., 4.);
let e: f32x2 = f32x2::new(3., 7.);
let r: f32x2 = transmute(vpadd_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmull_s16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let b: i16x4 = i16x4::new(1, 2, 3, 4);
let e: i32x4 = i32x4::new(0, 4, 12, 24);
let r: i32x4 = transmute(vqdmull_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmull_s32() {
let a: i32x2 = i32x2::new(0, 1);
let b: i32x2 = i32x2::new(1, 2);
let e: i64x2 = i64x2::new(0, 4);
let r: i64x2 = transmute(vqdmull_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmull_n_s16() {
let a: i16x4 = i16x4::new(2, 4, 6, 8);
let b: i16 = 2;
let e: i32x4 = i32x4::new(8, 16, 24, 32);
let r: i32x4 = transmute(vqdmull_n_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmull_n_s32() {
let a: i32x2 = i32x2::new(2, 4);
let b: i32 = 2;
let e: i64x2 = i64x2::new(8, 16);
let r: i64x2 = transmute(vqdmull_n_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmull_lane_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(0, 2, 2, 0);
let e: i32x4 = i32x4::new(4, 8, 12, 16);
let r: i32x4 = transmute(vqdmull_lane_s16::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmull_lane_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32x2 = i32x2::new(0, 2);
let e: i64x2 = i64x2::new(4, 8);
let r: i64x2 = transmute(vqdmull_lane_s32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmlal_s16() {
let a: i32x4 = i32x4::new(1, 1, 1, 1);
let b: i16x4 = i16x4::new(1, 2, 3, 4);
let c: i16x4 = i16x4::new(2, 2, 2, 2);
let e: i32x4 = i32x4::new(5, 9, 13, 17);
let r: i32x4 = transmute(vqdmlal_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmlal_s32() {
let a: i64x2 = i64x2::new(1, 1);
let b: i32x2 = i32x2::new(1, 2);
let c: i32x2 = i32x2::new(2, 2);
let e: i64x2 = i64x2::new(5, 9);
let r: i64x2 = transmute(vqdmlal_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmlal_n_s16() {
let a: i32x4 = i32x4::new(1, 1, 1, 1);
let b: i16x4 = i16x4::new(1, 2, 3, 4);
let c: i16 = 2;
let e: i32x4 = i32x4::new(5, 9, 13, 17);
let r: i32x4 = transmute(vqdmlal_n_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmlal_n_s32() {
let a: i64x2 = i64x2::new(1, 1);
let b: i32x2 = i32x2::new(1, 2);
let c: i32 = 2;
let e: i64x2 = i64x2::new(5, 9);
let r: i64x2 = transmute(vqdmlal_n_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmlal_lane_s16() {
let a: i32x4 = i32x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(1, 2, 3, 4);
let c: i16x4 = i16x4::new(0, 2, 2, 0);
let e: i32x4 = i32x4::new(5, 10, 15, 20);
let r: i32x4 = transmute(vqdmlal_lane_s16::<2>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmlal_lane_s32() {
let a: i64x2 = i64x2::new(1, 2);
let b: i32x2 = i32x2::new(1, 2);
let c: i32x2 = i32x2::new(0, 2);
let e: i64x2 = i64x2::new(5, 10);
let r: i64x2 = transmute(vqdmlal_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmlsl_s16() {
let a: i32x4 = i32x4::new(3, 7, 11, 15);
let b: i16x4 = i16x4::new(1, 2, 3, 4);
let c: i16x4 = i16x4::new(2, 2, 2, 2);
let e: i32x4 = i32x4::new(-1, -1, -1, -1);
let r: i32x4 = transmute(vqdmlsl_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmlsl_s32() {
let a: i64x2 = i64x2::new(3, 7);
let b: i32x2 = i32x2::new(1, 2);
let c: i32x2 = i32x2::new(2, 2);
let e: i64x2 = i64x2::new(-1, -1);
let r: i64x2 = transmute(vqdmlsl_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmlsl_n_s16() {
let a: i32x4 = i32x4::new(3, 7, 11, 15);
let b: i16x4 = i16x4::new(1, 2, 3, 4);
let c: i16 = 2;
let e: i32x4 = i32x4::new(-1, -1, -1, -1);
let r: i32x4 = transmute(vqdmlsl_n_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmlsl_n_s32() {
let a: i64x2 = i64x2::new(3, 7);
let b: i32x2 = i32x2::new(1, 2);
let c: i32 = 2;
let e: i64x2 = i64x2::new(-1, -1);
let r: i64x2 = transmute(vqdmlsl_n_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmlsl_lane_s16() {
let a: i32x4 = i32x4::new(3, 6, 9, 12);
let b: i16x4 = i16x4::new(1, 2, 3, 4);
let c: i16x4 = i16x4::new(0, 2, 2, 0);
let e: i32x4 = i32x4::new(-1, -2, -3, -4);
let r: i32x4 = transmute(vqdmlsl_lane_s16::<2>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmlsl_lane_s32() {
let a: i64x2 = i64x2::new(3, 6);
let b: i32x2 = i32x2::new(1, 2);
let c: i32x2 = i32x2::new(0, 2);
let e: i64x2 = i64x2::new(-1, -2);
let r: i64x2 = transmute(vqdmlsl_lane_s32::<1>(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmulh_s16() {
let a: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let e: i16x4 = i16x4::new(1, 1, 1, 1);
let r: i16x4 = transmute(vqdmulh_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmulhq_s16() {
let a: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let r: i16x8 = transmute(vqdmulhq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmulh_s32() {
let a: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
let b: i32x2 = i32x2::new(2, 2);
let e: i32x2 = i32x2::new(1, 1);
let r: i32x2 = transmute(vqdmulh_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmulhq_s32() {
let a: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let e: i32x4 = i32x4::new(1, 1, 1, 1);
let r: i32x4 = transmute(vqdmulhq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmulh_n_s16() {
let a: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
let b: i16 = 2;
let e: i16x4 = i16x4::new(1, 1, 1, 1);
let r: i16x4 = transmute(vqdmulh_n_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmulh_n_s32() {
let a: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
let b: i32 = 2;
let e: i32x2 = i32x2::new(1, 1);
let r: i32x2 = transmute(vqdmulh_n_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmulhq_n_s16() {
let a: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
let b: i16 = 2;
let e: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let r: i16x8 = transmute(vqdmulhq_n_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmulhq_n_s32() {
let a: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
let b: i32 = 2;
let e: i32x4 = i32x4::new(1, 1, 1, 1);
let r: i32x4 = transmute(vqdmulhq_n_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmulhq_laneq_s16() {
let a: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
let b: i16x8 = i16x8::new(2, 1, 1, 1, 1, 1, 1, 1);
let e: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let r: i16x8 = transmute(vqdmulhq_laneq_s16::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmulh_laneq_s16() {
let a: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
let b: i16x8 = i16x8::new(2, 1, 1, 1, 1, 1, 1, 1);
let e: i16x4 = i16x4::new(1, 1, 1, 1);
let r: i16x4 = transmute(vqdmulh_laneq_s16::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmulhq_laneq_s32() {
let a: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
let b: i32x4 = i32x4::new(2, 1, 1, 1);
let e: i32x4 = i32x4::new(1, 1, 1, 1);
let r: i32x4 = transmute(vqdmulhq_laneq_s32::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqdmulh_laneq_s32() {
let a: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
let b: i32x4 = i32x4::new(2, 1, 1, 1);
let e: i32x2 = i32x2::new(1, 1);
let r: i32x2 = transmute(vqdmulh_laneq_s32::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqmovn_s16() {
let a: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
let e: i8x8 = i8x8::new(0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F);
let r: i8x8 = transmute(vqmovn_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqmovn_s32() {
let a: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
let e: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
let r: i16x4 = transmute(vqmovn_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqmovn_s64() {
let a: i64x2 = i64x2::new(0x7F_FF_FF_FF_FF_FF_FF_FF, 0x7F_FF_FF_FF_FF_FF_FF_FF);
let e: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
let r: i32x2 = transmute(vqmovn_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqmovn_u16() {
let a: u16x8 = u16x8::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let e: u8x8 = u8x8::new(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
let r: u8x8 = transmute(vqmovn_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqmovn_u32() {
let a: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let e: u16x4 = u16x4::new(0xFF_FF, 0xFF_FF, 0xFF_FF, 0xFF_FF);
let r: u16x4 = transmute(vqmovn_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqmovn_u64() {
let a: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0xFF_FF_FF_FF_FF_FF_FF_FF);
let e: u32x2 = u32x2::new(0xFF_FF_FF_FF, 0xFF_FF_FF_FF);
let r: u32x2 = transmute(vqmovn_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqmovun_s16() {
let a: i16x8 = i16x8::new(-1, -1, -1, -1, -1, -1, -1, -1);
let e: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: u8x8 = transmute(vqmovun_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqmovun_s32() {
let a: i32x4 = i32x4::new(-1, -1, -1, -1);
let e: u16x4 = u16x4::new(0, 0, 0, 0);
let r: u16x4 = transmute(vqmovun_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqmovun_s64() {
let a: i64x2 = i64x2::new(-1, -1);
let e: u32x2 = u32x2::new(0, 0);
let r: u32x2 = transmute(vqmovun_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrdmulh_s16() {
let a: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let e: i16x4 = i16x4::new(2, 2, 2, 2);
let r: i16x4 = transmute(vqrdmulh_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrdmulhq_s16() {
let a: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let r: i16x8 = transmute(vqrdmulhq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrdmulh_s32() {
let a: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
let b: i32x2 = i32x2::new(2, 2);
let e: i32x2 = i32x2::new(2, 2);
let r: i32x2 = transmute(vqrdmulh_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrdmulhq_s32() {
let a: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let e: i32x4 = i32x4::new(2, 2, 2, 2);
let r: i32x4 = transmute(vqrdmulhq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrdmulh_n_s16() {
let a: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
let b: i16 = 2;
let e: i16x4 = i16x4::new(2, 2, 2, 2);
let r: i16x4 = transmute(vqrdmulh_n_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrdmulhq_n_s16() {
let a: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
let b: i16 = 2;
let e: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let r: i16x8 = transmute(vqrdmulhq_n_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrdmulh_n_s32() {
let a: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
let b: i32 = 2;
let e: i32x2 = i32x2::new(2, 2);
let r: i32x2 = transmute(vqrdmulh_n_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrdmulhq_n_s32() {
let a: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
let b: i32 = 2;
let e: i32x4 = i32x4::new(2, 2, 2, 2);
let r: i32x4 = transmute(vqrdmulhq_n_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrdmulh_lane_s16() {
let a: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
let b: i16x4 = i16x4::new(0, 2, 0, 0);
let e: i16x4 = i16x4::new(2, 2, 2, 2);
let r: i16x4 = transmute(vqrdmulh_lane_s16::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrdmulh_laneq_s16() {
let a: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
let b: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
let e: i16x4 = i16x4::new(2, 2, 2, 2);
let r: i16x4 = transmute(vqrdmulh_laneq_s16::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrdmulhq_lane_s16() {
let a: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
let b: i16x4 = i16x4::new(0, 2, 0, 0);
let e: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let r: i16x8 = transmute(vqrdmulhq_lane_s16::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrdmulhq_laneq_s16() {
let a: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
let b: i16x8 = i16x8::new(0, 2, 0, 0, 0, 0, 0, 0);
let e: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let r: i16x8 = transmute(vqrdmulhq_laneq_s16::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrdmulh_lane_s32() {
let a: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
let b: i32x2 = i32x2::new(0, 2);
let e: i32x2 = i32x2::new(2, 2);
let r: i32x2 = transmute(vqrdmulh_lane_s32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrdmulh_laneq_s32() {
let a: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
let b: i32x4 = i32x4::new(0, 2, 0, 0);
let e: i32x2 = i32x2::new(2, 2);
let r: i32x2 = transmute(vqrdmulh_laneq_s32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrdmulhq_lane_s32() {
let a: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
let b: i32x2 = i32x2::new(0, 2);
let e: i32x4 = i32x4::new(2, 2, 2, 2);
let r: i32x4 = transmute(vqrdmulhq_lane_s32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrdmulhq_laneq_s32() {
let a: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
let b: i32x4 = i32x4::new(0, 2, 0, 0);
let e: i32x4 = i32x4::new(2, 2, 2, 2);
let r: i32x4 = transmute(vqrdmulhq_laneq_s32::<1>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshl_s8() {
let a: i8x8 = i8x8::new(2, -128, 0x7F, 3, 4, 5, 6, 7);
let b: i8x8 = i8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: i8x8 = i8x8::new(8, -128, 0x7F, 12, 16, 20, 24, 28);
let r: i8x8 = transmute(vqrshl_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshlq_s8() {
let a: i8x16 = i8x16::new(2, -128, 0x7F, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let e: i8x16 = i8x16::new(8, -128, 0x7F, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60);
let r: i8x16 = transmute(vqrshlq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshl_s16() {
let a: i16x4 = i16x4::new(2, -32768, 0x7F_FF, 3);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let e: i16x4 = i16x4::new(8, -32768, 0x7F_FF, 12);
let r: i16x4 = transmute(vqrshl_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshlq_s16() {
let a: i16x8 = i16x8::new(2, -32768, 0x7F_FF, 3, 4, 5, 6, 7);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: i16x8 = i16x8::new(8, -32768, 0x7F_FF, 12, 16, 20, 24, 28);
let r: i16x8 = transmute(vqrshlq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshl_s32() {
let a: i32x2 = i32x2::new(2, -2147483648);
let b: i32x2 = i32x2::new(2, 2);
let e: i32x2 = i32x2::new(8, -2147483648);
let r: i32x2 = transmute(vqrshl_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshlq_s32() {
let a: i32x4 = i32x4::new(2, -2147483648, 0x7F_FF_FF_FF, 3);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let e: i32x4 = i32x4::new(8, -2147483648, 0x7F_FF_FF_FF, 12);
let r: i32x4 = transmute(vqrshlq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshl_s64() {
let a: i64x1 = i64x1::new(2);
let b: i64x1 = i64x1::new(2);
let e: i64x1 = i64x1::new(8);
let r: i64x1 = transmute(vqrshl_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshlq_s64() {
let a: i64x2 = i64x2::new(2, -9223372036854775808);
let b: i64x2 = i64x2::new(2, 2);
let e: i64x2 = i64x2::new(8, -9223372036854775808);
let r: i64x2 = transmute(vqrshlq_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshl_u8() {
let a: u8x8 = u8x8::new(2, 0, 0xFF, 3, 4, 5, 6, 7);
let b: i8x8 = i8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: u8x8 = u8x8::new(8, 0, 0xFF, 12, 16, 20, 24, 28);
let r: u8x8 = transmute(vqrshl_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshlq_u8() {
let a: u8x16 = u8x16::new(2, 0, 0xFF, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let e: u8x16 = u8x16::new(8, 0, 0xFF, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60);
let r: u8x16 = transmute(vqrshlq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshl_u16() {
let a: u16x4 = u16x4::new(2, 0, 0xFF_FF, 3);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let e: u16x4 = u16x4::new(8, 0, 0xFF_FF, 12);
let r: u16x4 = transmute(vqrshl_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshlq_u16() {
let a: u16x8 = u16x8::new(2, 0, 0xFF_FF, 3, 4, 5, 6, 7);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: u16x8 = u16x8::new(8, 0, 0xFF_FF, 12, 16, 20, 24, 28);
let r: u16x8 = transmute(vqrshlq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshl_u32() {
let a: u32x2 = u32x2::new(2, 0);
let b: i32x2 = i32x2::new(2, 2);
let e: u32x2 = u32x2::new(8, 0);
let r: u32x2 = transmute(vqrshl_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshlq_u32() {
let a: u32x4 = u32x4::new(2, 0, 0xFF_FF_FF_FF, 3);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let e: u32x4 = u32x4::new(8, 0, 0xFF_FF_FF_FF, 12);
let r: u32x4 = transmute(vqrshlq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshl_u64() {
let a: u64x1 = u64x1::new(2);
let b: i64x1 = i64x1::new(2);
let e: u64x1 = u64x1::new(8);
let r: u64x1 = transmute(vqrshl_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshlq_u64() {
let a: u64x2 = u64x2::new(2, 0);
let b: i64x2 = i64x2::new(2, 2);
let e: u64x2 = u64x2::new(8, 0);
let r: u64x2 = transmute(vqrshlq_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshrn_n_s16() {
let a: i16x8 = i16x8::new(-32768, 4, 8, 12, 16, 20, 24, 28);
let e: i8x8 = i8x8::new(-128, 1, 2, 3, 4, 5, 6, 7);
let r: i8x8 = transmute(vqrshrn_n_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshrn_n_s32() {
let a: i32x4 = i32x4::new(-2147483648, 4, 8, 12);
let e: i16x4 = i16x4::new(-32768, 1, 2, 3);
let r: i16x4 = transmute(vqrshrn_n_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshrn_n_s64() {
let a: i64x2 = i64x2::new(-9223372036854775808, 4);
let e: i32x2 = i32x2::new(-2147483648, 1);
let r: i32x2 = transmute(vqrshrn_n_s64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshrn_n_u16() {
let a: u16x8 = u16x8::new(0, 4, 8, 12, 16, 20, 24, 28);
let e: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: u8x8 = transmute(vqrshrn_n_u16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshrn_n_u32() {
let a: u32x4 = u32x4::new(0, 4, 8, 12);
let e: u16x4 = u16x4::new(0, 1, 2, 3);
let r: u16x4 = transmute(vqrshrn_n_u32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshrn_n_u64() {
let a: u64x2 = u64x2::new(0, 4);
let e: u32x2 = u32x2::new(0, 1);
let r: u32x2 = transmute(vqrshrn_n_u64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshrun_n_s16() {
let a: i16x8 = i16x8::new(0, 4, 8, 12, 16, 20, 24, 28);
let e: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: u8x8 = transmute(vqrshrun_n_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshrun_n_s32() {
let a: i32x4 = i32x4::new(0, 4, 8, 12);
let e: u16x4 = u16x4::new(0, 1, 2, 3);
let r: u16x4 = transmute(vqrshrun_n_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqrshrun_n_s64() {
let a: i64x2 = i64x2::new(0, 4);
let e: u32x2 = u32x2::new(0, 1);
let r: u32x2 = transmute(vqrshrun_n_s64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshl_s8() {
let a: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: i8x8 = i8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: i8x8 = i8x8::new(0, 4, 8, 12, 16, 20, 24, 28);
let r: i8x8 = transmute(vqshl_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlq_s8() {
let a: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let e: i8x16 = i8x16::new(0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60);
let r: i8x16 = transmute(vqshlq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshl_s16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let e: i16x4 = i16x4::new(0, 4, 8, 12);
let r: i16x4 = transmute(vqshl_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlq_s16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: i16x8 = i16x8::new(0, 4, 8, 12, 16, 20, 24, 28);
let r: i16x8 = transmute(vqshlq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshl_s32() {
let a: i32x2 = i32x2::new(0, 1);
let b: i32x2 = i32x2::new(2, 2);
let e: i32x2 = i32x2::new(0, 4);
let r: i32x2 = transmute(vqshl_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlq_s32() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let e: i32x4 = i32x4::new(0, 4, 8, 12);
let r: i32x4 = transmute(vqshlq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshl_s64() {
let a: i64x1 = i64x1::new(0);
let b: i64x1 = i64x1::new(2);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vqshl_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlq_s64() {
let a: i64x2 = i64x2::new(0, 1);
let b: i64x2 = i64x2::new(2, 2);
let e: i64x2 = i64x2::new(0, 4);
let r: i64x2 = transmute(vqshlq_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshl_u8() {
let a: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: i8x8 = i8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: u8x8 = u8x8::new(0, 4, 8, 12, 16, 20, 24, 28);
let r: u8x8 = transmute(vqshl_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlq_u8() {
let a: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let e: u8x16 = u8x16::new(0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60);
let r: u8x16 = transmute(vqshlq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshl_u16() {
let a: u16x4 = u16x4::new(0, 1, 2, 3);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let e: u16x4 = u16x4::new(0, 4, 8, 12);
let r: u16x4 = transmute(vqshl_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlq_u16() {
let a: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: u16x8 = u16x8::new(0, 4, 8, 12, 16, 20, 24, 28);
let r: u16x8 = transmute(vqshlq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshl_u32() {
let a: u32x2 = u32x2::new(0, 1);
let b: i32x2 = i32x2::new(2, 2);
let e: u32x2 = u32x2::new(0, 4);
let r: u32x2 = transmute(vqshl_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlq_u32() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let e: u32x4 = u32x4::new(0, 4, 8, 12);
let r: u32x4 = transmute(vqshlq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshl_u64() {
let a: u64x1 = u64x1::new(0);
let b: i64x1 = i64x1::new(2);
let e: u64x1 = u64x1::new(0);
let r: u64x1 = transmute(vqshl_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlq_u64() {
let a: u64x2 = u64x2::new(0, 1);
let b: i64x2 = i64x2::new(2, 2);
let e: u64x2 = u64x2::new(0, 4);
let r: u64x2 = transmute(vqshlq_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshl_n_s8() {
let a: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: i8x8 = i8x8::new(0, 4, 8, 12, 16, 20, 24, 28);
let r: i8x8 = transmute(vqshl_n_s8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlq_n_s8() {
let a: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let e: i8x16 = i8x16::new(0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60);
let r: i8x16 = transmute(vqshlq_n_s8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshl_n_s16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let e: i16x4 = i16x4::new(0, 4, 8, 12);
let r: i16x4 = transmute(vqshl_n_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlq_n_s16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: i16x8 = i16x8::new(0, 4, 8, 12, 16, 20, 24, 28);
let r: i16x8 = transmute(vqshlq_n_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshl_n_s32() {
let a: i32x2 = i32x2::new(0, 1);
let e: i32x2 = i32x2::new(0, 4);
let r: i32x2 = transmute(vqshl_n_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlq_n_s32() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let e: i32x4 = i32x4::new(0, 4, 8, 12);
let r: i32x4 = transmute(vqshlq_n_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshl_n_s64() {
let a: i64x1 = i64x1::new(0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vqshl_n_s64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlq_n_s64() {
let a: i64x2 = i64x2::new(0, 1);
let e: i64x2 = i64x2::new(0, 4);
let r: i64x2 = transmute(vqshlq_n_s64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshl_n_u8() {
let a: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u8x8 = u8x8::new(0, 4, 8, 12, 16, 20, 24, 28);
let r: u8x8 = transmute(vqshl_n_u8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlq_n_u8() {
let a: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let e: u8x16 = u8x16::new(0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60);
let r: u8x16 = transmute(vqshlq_n_u8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshl_n_u16() {
let a: u16x4 = u16x4::new(0, 1, 2, 3);
let e: u16x4 = u16x4::new(0, 4, 8, 12);
let r: u16x4 = transmute(vqshl_n_u16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlq_n_u16() {
let a: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u16x8 = u16x8::new(0, 4, 8, 12, 16, 20, 24, 28);
let r: u16x8 = transmute(vqshlq_n_u16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshl_n_u32() {
let a: u32x2 = u32x2::new(0, 1);
let e: u32x2 = u32x2::new(0, 4);
let r: u32x2 = transmute(vqshl_n_u32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlq_n_u32() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let e: u32x4 = u32x4::new(0, 4, 8, 12);
let r: u32x4 = transmute(vqshlq_n_u32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshl_n_u64() {
let a: u64x1 = u64x1::new(0);
let e: u64x1 = u64x1::new(0);
let r: u64x1 = transmute(vqshl_n_u64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlq_n_u64() {
let a: u64x2 = u64x2::new(0, 1);
let e: u64x2 = u64x2::new(0, 4);
let r: u64x2 = transmute(vqshlq_n_u64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlu_n_s8() {
let a: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u8x8 = u8x8::new(0, 4, 8, 12, 16, 20, 24, 28);
let r: u8x8 = transmute(vqshlu_n_s8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlu_n_s16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let e: u16x4 = u16x4::new(0, 4, 8, 12);
let r: u16x4 = transmute(vqshlu_n_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlu_n_s32() {
let a: i32x2 = i32x2::new(0, 1);
let e: u32x2 = u32x2::new(0, 4);
let r: u32x2 = transmute(vqshlu_n_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshlu_n_s64() {
let a: i64x1 = i64x1::new(0);
let e: u64x1 = u64x1::new(0);
let r: u64x1 = transmute(vqshlu_n_s64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshluq_n_s8() {
let a: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let e: u8x16 = u8x16::new(0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60);
let r: u8x16 = transmute(vqshluq_n_s8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshluq_n_s16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u16x8 = u16x8::new(0, 4, 8, 12, 16, 20, 24, 28);
let r: u16x8 = transmute(vqshluq_n_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshluq_n_s32() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let e: u32x4 = u32x4::new(0, 4, 8, 12);
let r: u32x4 = transmute(vqshluq_n_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshluq_n_s64() {
let a: i64x2 = i64x2::new(0, 1);
let e: u64x2 = u64x2::new(0, 4);
let r: u64x2 = transmute(vqshluq_n_s64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshrn_n_s16() {
let a: i16x8 = i16x8::new(0, 4, 8, 12, 16, 20, 24, 28);
let e: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i8x8 = transmute(vqshrn_n_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshrn_n_s32() {
let a: i32x4 = i32x4::new(0, 4, 8, 12);
let e: i16x4 = i16x4::new(0, 1, 2, 3);
let r: i16x4 = transmute(vqshrn_n_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshrn_n_s64() {
let a: i64x2 = i64x2::new(0, 4);
let e: i32x2 = i32x2::new(0, 1);
let r: i32x2 = transmute(vqshrn_n_s64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshrn_n_u16() {
let a: u16x8 = u16x8::new(0, 4, 8, 12, 16, 20, 24, 28);
let e: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: u8x8 = transmute(vqshrn_n_u16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshrn_n_u32() {
let a: u32x4 = u32x4::new(0, 4, 8, 12);
let e: u16x4 = u16x4::new(0, 1, 2, 3);
let r: u16x4 = transmute(vqshrn_n_u32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshrn_n_u64() {
let a: u64x2 = u64x2::new(0, 4);
let e: u32x2 = u32x2::new(0, 1);
let r: u32x2 = transmute(vqshrn_n_u64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshrun_n_s16() {
let a: i16x8 = i16x8::new(0, 4, 8, 12, 16, 20, 24, 28);
let e: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: u8x8 = transmute(vqshrun_n_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshrun_n_s32() {
let a: i32x4 = i32x4::new(0, 4, 8, 12);
let e: u16x4 = u16x4::new(0, 1, 2, 3);
let r: u16x4 = transmute(vqshrun_n_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqshrun_n_s64() {
let a: i64x2 = i64x2::new(0, 4);
let e: u32x2 = u32x2::new(0, 1);
let r: u32x2 = transmute(vqshrun_n_s64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsqrte_f32() {
let a: f32x2 = f32x2::new(1.0, 2.0);
let e: f32x2 = f32x2::new(0.998046875, 0.705078125);
let r: f32x2 = transmute(vrsqrte_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsqrteq_f32() {
let a: f32x4 = f32x4::new(1.0, 2.0, 3.0, 4.0);
let e: f32x4 = f32x4::new(0.998046875, 0.705078125, 0.576171875, 0.4990234375);
let r: f32x4 = transmute(vrsqrteq_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsqrte_u32() {
let a: u32x2 = u32x2::new(1, 2);
let e: u32x2 = u32x2::new(4294967295, 4294967295);
let r: u32x2 = transmute(vrsqrte_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsqrteq_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let e: u32x4 = u32x4::new(4294967295, 4294967295, 4294967295, 4294967295);
let r: u32x4 = transmute(vrsqrteq_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsqrts_f32() {
let a: f32x2 = f32x2::new(1.0, 2.0);
let b: f32x2 = f32x2::new(1.0, 2.0);
let e: f32x2 = f32x2::new(1., -0.5);
let r: f32x2 = transmute(vrsqrts_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsqrtsq_f32() {
let a: f32x4 = f32x4::new(1.0, 2.0, 3.0, 4.0);
let b: f32x4 = f32x4::new(1.0, 2.0, 3.0, 4.0);
let e: f32x4 = f32x4::new(1., -0.5, -3.0, -6.5);
let r: f32x4 = transmute(vrsqrtsq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrecpe_f32() {
let a: f32x2 = f32x2::new(4.0, 3.0);
let e: f32x2 = f32x2::new(0.24951171875, 0.3330078125);
let r: f32x2 = transmute(vrecpe_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrecpeq_f32() {
let a: f32x4 = f32x4::new(4.0, 3.0, 2.0, 1.0);
let e: f32x4 = f32x4::new(0.24951171875, 0.3330078125, 0.4990234375, 0.998046875);
let r: f32x4 = transmute(vrecpeq_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrecpe_u32() {
let a: u32x2 = u32x2::new(4, 3);
let e: u32x2 = u32x2::new(4294967295, 4294967295);
let r: u32x2 = transmute(vrecpe_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrecpeq_u32() {
let a: u32x4 = u32x4::new(4, 3, 2, 1);
let e: u32x4 = u32x4::new(4294967295, 4294967295, 4294967295, 4294967295);
let r: u32x4 = transmute(vrecpeq_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrecps_f32() {
let a: f32x2 = f32x2::new(4.0, 3.0);
let b: f32x2 = f32x2::new(4.0, 3.0);
let e: f32x2 = f32x2::new(-14., -7.);
let r: f32x2 = transmute(vrecps_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrecpsq_f32() {
let a: f32x4 = f32x4::new(4.0, 3.0, 2.0, 1.0);
let b: f32x4 = f32x4::new(4.0, 3.0, 2.0, 1.0);
let e: f32x4 = f32x4::new(-14., -7., -2., 1.);
let r: f32x4 = transmute(vrecpsq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s8_u8() {
let a: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i8x8 = transmute(vreinterpret_s8_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s8_p8() {
let a: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i8x8 = transmute(vreinterpret_s8_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s16_p16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let e: i16x4 = i16x4::new(0, 1, 2, 3);
let r: i16x4 = transmute(vreinterpret_s16_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s16_u16() {
let a: u16x4 = u16x4::new(0, 1, 2, 3);
let e: i16x4 = i16x4::new(0, 1, 2, 3);
let r: i16x4 = transmute(vreinterpret_s16_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s32_u32() {
let a: u32x2 = u32x2::new(0, 1);
let e: i32x2 = i32x2::new(0, 1);
let r: i32x2 = transmute(vreinterpret_s32_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s64_u64() {
let a: u64x1 = u64x1::new(0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_s64_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s8_u8() {
let a: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let e: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let r: i8x16 = transmute(vreinterpretq_s8_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s8_p8() {
let a: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let e: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let r: i8x16 = transmute(vreinterpretq_s8_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s16_p16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i16x8 = transmute(vreinterpretq_s16_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s16_u16() {
let a: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i16x8 = transmute(vreinterpretq_s16_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s32_u32() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let e: i32x4 = i32x4::new(0, 1, 2, 3);
let r: i32x4 = transmute(vreinterpretq_s32_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s64_u64() {
let a: u64x2 = u64x2::new(0, 1);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vreinterpretq_s64_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u8_p8() {
let a: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: u8x8 = transmute(vreinterpret_u8_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u8_s8() {
let a: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: u8x8 = transmute(vreinterpret_u8_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u16_p16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let e: u16x4 = u16x4::new(0, 1, 2, 3);
let r: u16x4 = transmute(vreinterpret_u16_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u16_s16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let e: u16x4 = u16x4::new(0, 1, 2, 3);
let r: u16x4 = transmute(vreinterpret_u16_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u32_s32() {
let a: i32x2 = i32x2::new(0, 1);
let e: u32x2 = u32x2::new(0, 1);
let r: u32x2 = transmute(vreinterpret_u32_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u64_s64() {
let a: i64x1 = i64x1::new(0);
let e: u64x1 = u64x1::new(0);
let r: u64x1 = transmute(vreinterpret_u64_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u8_p8() {
let a: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let e: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let r: u8x16 = transmute(vreinterpretq_u8_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u8_s8() {
let a: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let e: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let r: u8x16 = transmute(vreinterpretq_u8_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u16_p16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: u16x8 = transmute(vreinterpretq_u16_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u16_s16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: u16x8 = transmute(vreinterpretq_u16_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u32_s32() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let e: u32x4 = u32x4::new(0, 1, 2, 3);
let r: u32x4 = transmute(vreinterpretq_u32_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u64_s64() {
let a: i64x2 = i64x2::new(0, 1);
let e: u64x2 = u64x2::new(0, 1);
let r: u64x2 = transmute(vreinterpretq_u64_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p8_s8() {
let a: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i8x8 = transmute(vreinterpret_p8_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p8_u8() {
let a: u8x8 = u8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: i8x8 = i8x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i8x8 = transmute(vreinterpret_p8_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p16_s16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let e: i16x4 = i16x4::new(0, 1, 2, 3);
let r: i16x4 = transmute(vreinterpret_p16_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p16_u16() {
let a: u16x4 = u16x4::new(0, 1, 2, 3);
let e: i16x4 = i16x4::new(0, 1, 2, 3);
let r: i16x4 = transmute(vreinterpret_p16_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p8_s8() {
let a: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let e: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let r: i8x16 = transmute(vreinterpretq_p8_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p8_u8() {
let a: u8x16 = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let e: i8x16 = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
let r: i8x16 = transmute(vreinterpretq_p8_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p16_s16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i16x8 = transmute(vreinterpretq_p16_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p16_u16() {
let a: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i16x8 = transmute(vreinterpretq_p16_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s8_s16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let e: i8x8 = i8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let r: i8x8 = transmute(vreinterpret_s8_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s8_u16() {
let a: u16x4 = u16x4::new(0, 1, 2, 3);
let e: i8x8 = i8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let r: i8x8 = transmute(vreinterpret_s8_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s8_p16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let e: i8x8 = i8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let r: i8x8 = transmute(vreinterpret_s8_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s16_s32() {
let a: i32x2 = i32x2::new(0, 1);
let e: i16x4 = i16x4::new(0, 0, 1, 0);
let r: i16x4 = transmute(vreinterpret_s16_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s16_u32() {
let a: u32x2 = u32x2::new(0, 1);
let e: i16x4 = i16x4::new(0, 0, 1, 0);
let r: i16x4 = transmute(vreinterpret_s16_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s32_s64() {
let a: i64x1 = i64x1::new(0);
let e: i32x2 = i32x2::new(0, 0);
let r: i32x2 = transmute(vreinterpret_s32_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s32_u64() {
let a: u64x1 = u64x1::new(0);
let e: i32x2 = i32x2::new(0, 0);
let r: i32x2 = transmute(vreinterpret_s32_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s8_s16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: i8x16 = i8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let r: i8x16 = transmute(vreinterpretq_s8_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s8_u16() {
let a: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: i8x16 = i8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let r: i8x16 = transmute(vreinterpretq_s8_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s8_p16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: i8x16 = i8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let r: i8x16 = transmute(vreinterpretq_s8_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s16_s32() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let e: i16x8 = i16x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let r: i16x8 = transmute(vreinterpretq_s16_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s16_u32() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let e: i16x8 = i16x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let r: i16x8 = transmute(vreinterpretq_s16_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s32_s64() {
let a: i64x2 = i64x2::new(0, 1);
let e: i32x4 = i32x4::new(0, 0, 1, 0);
let r: i32x4 = transmute(vreinterpretq_s32_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s32_u64() {
let a: u64x2 = u64x2::new(0, 1);
let e: i32x4 = i32x4::new(0, 0, 1, 0);
let r: i32x4 = transmute(vreinterpretq_s32_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u8_p16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let e: u8x8 = u8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let r: u8x8 = transmute(vreinterpret_u8_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u8_s16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let e: u8x8 = u8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let r: u8x8 = transmute(vreinterpret_u8_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u8_u16() {
let a: u16x4 = u16x4::new(0, 1, 2, 3);
let e: u8x8 = u8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let r: u8x8 = transmute(vreinterpret_u8_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u16_s32() {
let a: i32x2 = i32x2::new(0, 1);
let e: u16x4 = u16x4::new(0, 0, 1, 0);
let r: u16x4 = transmute(vreinterpret_u16_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u16_u32() {
let a: u32x2 = u32x2::new(0, 1);
let e: u16x4 = u16x4::new(0, 0, 1, 0);
let r: u16x4 = transmute(vreinterpret_u16_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u32_s64() {
let a: i64x1 = i64x1::new(0);
let e: u32x2 = u32x2::new(0, 0);
let r: u32x2 = transmute(vreinterpret_u32_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u32_u64() {
let a: u64x1 = u64x1::new(0);
let e: u32x2 = u32x2::new(0, 0);
let r: u32x2 = transmute(vreinterpret_u32_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u8_p16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u8x16 = u8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let r: u8x16 = transmute(vreinterpretq_u8_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u8_s16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u8x16 = u8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let r: u8x16 = transmute(vreinterpretq_u8_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u8_u16() {
let a: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: u8x16 = u8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let r: u8x16 = transmute(vreinterpretq_u8_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u16_s32() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let e: u16x8 = u16x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let r: u16x8 = transmute(vreinterpretq_u16_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u16_u32() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let e: u16x8 = u16x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let r: u16x8 = transmute(vreinterpretq_u16_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u32_s64() {
let a: i64x2 = i64x2::new(0, 1);
let e: u32x4 = u32x4::new(0, 0, 1, 0);
let r: u32x4 = transmute(vreinterpretq_u32_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u32_u64() {
let a: u64x2 = u64x2::new(0, 1);
let e: u32x4 = u32x4::new(0, 0, 1, 0);
let r: u32x4 = transmute(vreinterpretq_u32_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p8_p16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let e: i8x8 = i8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let r: i8x8 = transmute(vreinterpret_p8_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p8_s16() {
let a: i16x4 = i16x4::new(0, 1, 2, 3);
let e: i8x8 = i8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let r: i8x8 = transmute(vreinterpret_p8_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p8_u16() {
let a: u16x4 = u16x4::new(0, 1, 2, 3);
let e: i8x8 = i8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let r: i8x8 = transmute(vreinterpret_p8_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p16_s32() {
let a: i32x2 = i32x2::new(0, 1);
let e: i16x4 = i16x4::new(0, 0, 1, 0);
let r: i16x4 = transmute(vreinterpret_p16_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p16_u32() {
let a: u32x2 = u32x2::new(0, 1);
let e: i16x4 = i16x4::new(0, 0, 1, 0);
let r: i16x4 = transmute(vreinterpret_p16_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p8_p16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: i8x16 = i8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let r: i8x16 = transmute(vreinterpretq_p8_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p8_s16() {
let a: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: i8x16 = i8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let r: i8x16 = transmute(vreinterpretq_p8_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p8_u16() {
let a: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let e: i8x16 = i8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let r: i8x16 = transmute(vreinterpretq_p8_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p16_s32() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let e: i16x8 = i16x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let r: i16x8 = transmute(vreinterpretq_p16_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p16_u32() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let e: i16x8 = i16x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let r: i16x8 = transmute(vreinterpretq_p16_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s32_p64() {
let a: i64x1 = i64x1::new(0);
let e: i32x2 = i32x2::new(0, 0);
let r: i32x2 = transmute(vreinterpret_s32_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u32_p64() {
let a: i64x1 = i64x1::new(0);
let e: u32x2 = u32x2::new(0, 0);
let r: u32x2 = transmute(vreinterpret_u32_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s32_p64() {
let a: i64x2 = i64x2::new(0, 1);
let e: i32x4 = i32x4::new(0, 0, 1, 0);
let r: i32x4 = transmute(vreinterpretq_s32_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u32_p64() {
let a: i64x2 = i64x2::new(0, 1);
let e: u32x4 = u32x4::new(0, 0, 1, 0);
let r: u32x4 = transmute(vreinterpretq_u32_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s64_p128() {
let a: p128 = 0;
let e: i64x2 = i64x2::new(0, 0);
let r: i64x2 = transmute(vreinterpretq_s64_p128(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u64_p128() {
let a: p128 = 0;
let e: u64x2 = u64x2::new(0, 0);
let r: u64x2 = transmute(vreinterpretq_u64_p128(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p64_p128() {
let a: p128 = 0;
let e: i64x2 = i64x2::new(0, 0);
let r: i64x2 = transmute(vreinterpretq_p64_p128(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s16_p8() {
let a: i8x8 = i8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let e: i16x4 = i16x4::new(0, 1, 2, 3);
let r: i16x4 = transmute(vreinterpret_s16_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s16_s8() {
let a: i8x8 = i8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let e: i16x4 = i16x4::new(0, 1, 2, 3);
let r: i16x4 = transmute(vreinterpret_s16_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s16_u8() {
let a: u8x8 = u8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let e: i16x4 = i16x4::new(0, 1, 2, 3);
let r: i16x4 = transmute(vreinterpret_s16_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s32_p16() {
let a: i16x4 = i16x4::new(0, 0, 1, 0);
let e: i32x2 = i32x2::new(0, 1);
let r: i32x2 = transmute(vreinterpret_s32_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s32_s16() {
let a: i16x4 = i16x4::new(0, 0, 1, 0);
let e: i32x2 = i32x2::new(0, 1);
let r: i32x2 = transmute(vreinterpret_s32_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s32_u16() {
let a: u16x4 = u16x4::new(0, 0, 1, 0);
let e: i32x2 = i32x2::new(0, 1);
let r: i32x2 = transmute(vreinterpret_s32_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s64_s32() {
let a: i32x2 = i32x2::new(0, 0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_s64_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s64_u32() {
let a: u32x2 = u32x2::new(0, 0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_s64_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s16_p8() {
let a: i8x16 = i8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i16x8 = transmute(vreinterpretq_s16_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s16_s8() {
let a: i8x16 = i8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i16x8 = transmute(vreinterpretq_s16_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s16_u8() {
let a: u8x16 = u8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i16x8 = transmute(vreinterpretq_s16_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s32_p16() {
let a: i16x8 = i16x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let e: i32x4 = i32x4::new(0, 1, 2, 3);
let r: i32x4 = transmute(vreinterpretq_s32_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s32_s16() {
let a: i16x8 = i16x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let e: i32x4 = i32x4::new(0, 1, 2, 3);
let r: i32x4 = transmute(vreinterpretq_s32_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s32_u16() {
let a: u16x8 = u16x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let e: i32x4 = i32x4::new(0, 1, 2, 3);
let r: i32x4 = transmute(vreinterpretq_s32_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s64_s32() {
let a: i32x4 = i32x4::new(0, 0, 1, 0);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vreinterpretq_s64_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s64_u32() {
let a: u32x4 = u32x4::new(0, 0, 1, 0);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vreinterpretq_s64_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u16_p8() {
let a: i8x8 = i8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let e: u16x4 = u16x4::new(0, 1, 2, 3);
let r: u16x4 = transmute(vreinterpret_u16_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u16_s8() {
let a: i8x8 = i8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let e: u16x4 = u16x4::new(0, 1, 2, 3);
let r: u16x4 = transmute(vreinterpret_u16_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u16_u8() {
let a: u8x8 = u8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let e: u16x4 = u16x4::new(0, 1, 2, 3);
let r: u16x4 = transmute(vreinterpret_u16_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u32_p16() {
let a: i16x4 = i16x4::new(0, 0, 1, 0);
let e: u32x2 = u32x2::new(0, 1);
let r: u32x2 = transmute(vreinterpret_u32_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u32_s16() {
let a: i16x4 = i16x4::new(0, 0, 1, 0);
let e: u32x2 = u32x2::new(0, 1);
let r: u32x2 = transmute(vreinterpret_u32_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u32_u16() {
let a: u16x4 = u16x4::new(0, 0, 1, 0);
let e: u32x2 = u32x2::new(0, 1);
let r: u32x2 = transmute(vreinterpret_u32_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u64_s32() {
let a: i32x2 = i32x2::new(0, 0);
let e: u64x1 = u64x1::new(0);
let r: u64x1 = transmute(vreinterpret_u64_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u64_u32() {
let a: u32x2 = u32x2::new(0, 0);
let e: u64x1 = u64x1::new(0);
let r: u64x1 = transmute(vreinterpret_u64_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u16_p8() {
let a: i8x16 = i8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let e: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: u16x8 = transmute(vreinterpretq_u16_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u16_s8() {
let a: i8x16 = i8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let e: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: u16x8 = transmute(vreinterpretq_u16_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u16_u8() {
let a: u8x16 = u8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let e: u16x8 = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: u16x8 = transmute(vreinterpretq_u16_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u32_p16() {
let a: i16x8 = i16x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let e: u32x4 = u32x4::new(0, 1, 2, 3);
let r: u32x4 = transmute(vreinterpretq_u32_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u32_s16() {
let a: i16x8 = i16x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let e: u32x4 = u32x4::new(0, 1, 2, 3);
let r: u32x4 = transmute(vreinterpretq_u32_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u32_u16() {
let a: u16x8 = u16x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let e: u32x4 = u32x4::new(0, 1, 2, 3);
let r: u32x4 = transmute(vreinterpretq_u32_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u64_s32() {
let a: i32x4 = i32x4::new(0, 0, 1, 0);
let e: u64x2 = u64x2::new(0, 1);
let r: u64x2 = transmute(vreinterpretq_u64_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u64_u32() {
let a: u32x4 = u32x4::new(0, 0, 1, 0);
let e: u64x2 = u64x2::new(0, 1);
let r: u64x2 = transmute(vreinterpretq_u64_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p16_p8() {
let a: i8x8 = i8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let e: i16x4 = i16x4::new(0, 1, 2, 3);
let r: i16x4 = transmute(vreinterpret_p16_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p16_s8() {
let a: i8x8 = i8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let e: i16x4 = i16x4::new(0, 1, 2, 3);
let r: i16x4 = transmute(vreinterpret_p16_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p16_u8() {
let a: u8x8 = u8x8::new(0, 0, 1, 0, 2, 0, 3, 0);
let e: i16x4 = i16x4::new(0, 1, 2, 3);
let r: i16x4 = transmute(vreinterpret_p16_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p16_p8() {
let a: i8x16 = i8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i16x8 = transmute(vreinterpretq_p16_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p16_s8() {
let a: i8x16 = i8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i16x8 = transmute(vreinterpretq_p16_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p16_u8() {
let a: u8x16 = u8x16::new(0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0);
let e: i16x8 = i16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
let r: i16x8 = transmute(vreinterpretq_p16_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p64_s32() {
let a: i32x2 = i32x2::new(0, 0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_p64_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p64_u32() {
let a: u32x2 = u32x2::new(0, 0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_p64_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p64_s32() {
let a: i32x4 = i32x4::new(0, 0, 1, 0);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vreinterpretq_p64_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p64_u32() {
let a: u32x4 = u32x4::new(0, 0, 1, 0);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vreinterpretq_p64_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p128_s64() {
let a: i64x2 = i64x2::new(0, 0);
let e: p128 = 0;
let r: p128 = transmute(vreinterpretq_p128_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p128_u64() {
let a: u64x2 = u64x2::new(0, 0);
let e: p128 = 0;
let r: p128 = transmute(vreinterpretq_p128_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p128_p64() {
let a: i64x2 = i64x2::new(0, 0);
let e: p128 = 0;
let r: p128 = transmute(vreinterpretq_p128_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s8_s32() {
let a: i32x2 = i32x2::new(0, 1);
let e: i8x8 = i8x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let r: i8x8 = transmute(vreinterpret_s8_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s8_u32() {
let a: u32x2 = u32x2::new(0, 1);
let e: i8x8 = i8x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let r: i8x8 = transmute(vreinterpret_s8_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s16_s64() {
let a: i64x1 = i64x1::new(0);
let e: i16x4 = i16x4::new(0, 0, 0, 0);
let r: i16x4 = transmute(vreinterpret_s16_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s16_u64() {
let a: u64x1 = u64x1::new(0);
let e: i16x4 = i16x4::new(0, 0, 0, 0);
let r: i16x4 = transmute(vreinterpret_s16_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s8_s32() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let e: i8x16 = i8x16::new(0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0);
let r: i8x16 = transmute(vreinterpretq_s8_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s8_u32() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let e: i8x16 = i8x16::new(0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0);
let r: i8x16 = transmute(vreinterpretq_s8_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s16_s64() {
let a: i64x2 = i64x2::new(0, 1);
let e: i16x8 = i16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let r: i16x8 = transmute(vreinterpretq_s16_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s16_u64() {
let a: u64x2 = u64x2::new(0, 1);
let e: i16x8 = i16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let r: i16x8 = transmute(vreinterpretq_s16_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u8_s32() {
let a: i32x2 = i32x2::new(0, 1);
let e: u8x8 = u8x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let r: u8x8 = transmute(vreinterpret_u8_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u8_u32() {
let a: u32x2 = u32x2::new(0, 1);
let e: u8x8 = u8x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let r: u8x8 = transmute(vreinterpret_u8_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u16_s64() {
let a: i64x1 = i64x1::new(0);
let e: u16x4 = u16x4::new(0, 0, 0, 0);
let r: u16x4 = transmute(vreinterpret_u16_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u16_u64() {
let a: u64x1 = u64x1::new(0);
let e: u16x4 = u16x4::new(0, 0, 0, 0);
let r: u16x4 = transmute(vreinterpret_u16_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u8_s32() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let e: u8x16 = u8x16::new(0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0);
let r: u8x16 = transmute(vreinterpretq_u8_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u8_u32() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let e: u8x16 = u8x16::new(0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0);
let r: u8x16 = transmute(vreinterpretq_u8_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u16_s64() {
let a: i64x2 = i64x2::new(0, 1);
let e: u16x8 = u16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let r: u16x8 = transmute(vreinterpretq_u16_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u16_u64() {
let a: u64x2 = u64x2::new(0, 1);
let e: u16x8 = u16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let r: u16x8 = transmute(vreinterpretq_u16_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p8_s32() {
let a: i32x2 = i32x2::new(0, 1);
let e: i8x8 = i8x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let r: i8x8 = transmute(vreinterpret_p8_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p8_u32() {
let a: u32x2 = u32x2::new(0, 1);
let e: i8x8 = i8x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let r: i8x8 = transmute(vreinterpret_p8_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p16_s64() {
let a: i64x1 = i64x1::new(0);
let e: i16x4 = i16x4::new(0, 0, 0, 0);
let r: i16x4 = transmute(vreinterpret_p16_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p16_u64() {
let a: u64x1 = u64x1::new(0);
let e: i16x4 = i16x4::new(0, 0, 0, 0);
let r: i16x4 = transmute(vreinterpret_p16_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p8_s32() {
let a: i32x4 = i32x4::new(0, 1, 2, 3);
let e: i8x16 = i8x16::new(0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0);
let r: i8x16 = transmute(vreinterpretq_p8_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p8_u32() {
let a: u32x4 = u32x4::new(0, 1, 2, 3);
let e: i8x16 = i8x16::new(0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0);
let r: i8x16 = transmute(vreinterpretq_p8_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p16_s64() {
let a: i64x2 = i64x2::new(0, 1);
let e: i16x8 = i16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let r: i16x8 = transmute(vreinterpretq_p16_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p16_u64() {
let a: u64x2 = u64x2::new(0, 1);
let e: i16x8 = i16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let r: i16x8 = transmute(vreinterpretq_p16_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s16_p64() {
let a: i64x1 = i64x1::new(0);
let e: i16x4 = i16x4::new(0, 0, 0, 0);
let r: i16x4 = transmute(vreinterpret_s16_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u16_p64() {
let a: i64x1 = i64x1::new(0);
let e: u16x4 = u16x4::new(0, 0, 0, 0);
let r: u16x4 = transmute(vreinterpret_u16_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p16_p64() {
let a: i64x1 = i64x1::new(0);
let e: i16x4 = i16x4::new(0, 0, 0, 0);
let r: i16x4 = transmute(vreinterpret_p16_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s16_p64() {
let a: i64x2 = i64x2::new(0, 1);
let e: i16x8 = i16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let r: i16x8 = transmute(vreinterpretq_s16_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u16_p64() {
let a: i64x2 = i64x2::new(0, 1);
let e: u16x8 = u16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let r: u16x8 = transmute(vreinterpretq_u16_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p16_p64() {
let a: i64x2 = i64x2::new(0, 1);
let e: i16x8 = i16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let r: i16x8 = transmute(vreinterpretq_p16_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s32_p128() {
let a: p128 = 0;
let e: i32x4 = i32x4::new(0, 0, 0, 0);
let r: i32x4 = transmute(vreinterpretq_s32_p128(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u32_p128() {
let a: p128 = 0;
let e: u32x4 = u32x4::new(0, 0, 0, 0);
let r: u32x4 = transmute(vreinterpretq_u32_p128(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s32_p8() {
let a: i8x8 = i8x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: i32x2 = i32x2::new(0, 1);
let r: i32x2 = transmute(vreinterpret_s32_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s32_s8() {
let a: i8x8 = i8x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: i32x2 = i32x2::new(0, 1);
let r: i32x2 = transmute(vreinterpret_s32_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s32_u8() {
let a: u8x8 = u8x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: i32x2 = i32x2::new(0, 1);
let r: i32x2 = transmute(vreinterpret_s32_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s64_p16() {
let a: i16x4 = i16x4::new(0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_s64_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s64_s16() {
let a: i16x4 = i16x4::new(0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_s64_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s64_u16() {
let a: u16x4 = u16x4::new(0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_s64_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s32_p8() {
let a: i8x16 = i8x16::new(0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0);
let e: i32x4 = i32x4::new(0, 1, 2, 3);
let r: i32x4 = transmute(vreinterpretq_s32_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s32_s8() {
let a: i8x16 = i8x16::new(0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0);
let e: i32x4 = i32x4::new(0, 1, 2, 3);
let r: i32x4 = transmute(vreinterpretq_s32_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s32_u8() {
let a: u8x16 = u8x16::new(0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0);
let e: i32x4 = i32x4::new(0, 1, 2, 3);
let r: i32x4 = transmute(vreinterpretq_s32_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s64_p16() {
let a: i16x8 = i16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vreinterpretq_s64_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s64_s16() {
let a: i16x8 = i16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vreinterpretq_s64_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s64_u16() {
let a: u16x8 = u16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vreinterpretq_s64_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u32_p8() {
let a: i8x8 = i8x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: u32x2 = u32x2::new(0, 1);
let r: u32x2 = transmute(vreinterpret_u32_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u32_s8() {
let a: i8x8 = i8x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: u32x2 = u32x2::new(0, 1);
let r: u32x2 = transmute(vreinterpret_u32_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u32_u8() {
let a: u8x8 = u8x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: u32x2 = u32x2::new(0, 1);
let r: u32x2 = transmute(vreinterpret_u32_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u64_p16() {
let a: i16x4 = i16x4::new(0, 0, 0, 0);
let e: u64x1 = u64x1::new(0);
let r: u64x1 = transmute(vreinterpret_u64_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u64_s16() {
let a: i16x4 = i16x4::new(0, 0, 0, 0);
let e: u64x1 = u64x1::new(0);
let r: u64x1 = transmute(vreinterpret_u64_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u64_u16() {
let a: u16x4 = u16x4::new(0, 0, 0, 0);
let e: u64x1 = u64x1::new(0);
let r: u64x1 = transmute(vreinterpret_u64_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u32_p8() {
let a: i8x16 = i8x16::new(0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0);
let e: u32x4 = u32x4::new(0, 1, 2, 3);
let r: u32x4 = transmute(vreinterpretq_u32_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u32_s8() {
let a: i8x16 = i8x16::new(0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0);
let e: u32x4 = u32x4::new(0, 1, 2, 3);
let r: u32x4 = transmute(vreinterpretq_u32_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u32_u8() {
let a: u8x16 = u8x16::new(0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0);
let e: u32x4 = u32x4::new(0, 1, 2, 3);
let r: u32x4 = transmute(vreinterpretq_u32_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u64_p16() {
let a: i16x8 = i16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: u64x2 = u64x2::new(0, 1);
let r: u64x2 = transmute(vreinterpretq_u64_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u64_s16() {
let a: i16x8 = i16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: u64x2 = u64x2::new(0, 1);
let r: u64x2 = transmute(vreinterpretq_u64_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u64_u16() {
let a: u16x8 = u16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: u64x2 = u64x2::new(0, 1);
let r: u64x2 = transmute(vreinterpretq_u64_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p64_p16() {
let a: i16x4 = i16x4::new(0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_p64_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p64_s16() {
let a: i16x4 = i16x4::new(0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_p64_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p64_u16() {
let a: u16x4 = u16x4::new(0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_p64_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p64_p16() {
let a: i16x8 = i16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vreinterpretq_p64_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p64_s16() {
let a: i16x8 = i16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vreinterpretq_p64_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p64_u16() {
let a: u16x8 = u16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vreinterpretq_p64_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p128_s32() {
let a: i32x4 = i32x4::new(0, 0, 0, 0);
let e: p128 = 0;
let r: p128 = transmute(vreinterpretq_p128_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p128_u32() {
let a: u32x4 = u32x4::new(0, 0, 0, 0);
let e: p128 = 0;
let r: p128 = transmute(vreinterpretq_p128_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s8_s64() {
let a: i64x1 = i64x1::new(0);
let e: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: i8x8 = transmute(vreinterpret_s8_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s8_u64() {
let a: u64x1 = u64x1::new(0);
let e: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: i8x8 = transmute(vreinterpret_s8_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u8_s64() {
let a: i64x1 = i64x1::new(0);
let e: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: u8x8 = transmute(vreinterpret_u8_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u8_u64() {
let a: u64x1 = u64x1::new(0);
let e: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: u8x8 = transmute(vreinterpret_u8_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p8_s64() {
let a: i64x1 = i64x1::new(0);
let e: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: i8x8 = transmute(vreinterpret_p8_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p8_u64() {
let a: u64x1 = u64x1::new(0);
let e: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: i8x8 = transmute(vreinterpret_p8_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s8_s64() {
let a: i64x2 = i64x2::new(0, 1);
let e: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let r: i8x16 = transmute(vreinterpretq_s8_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s8_u64() {
let a: u64x2 = u64x2::new(0, 1);
let e: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let r: i8x16 = transmute(vreinterpretq_s8_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u8_s64() {
let a: i64x2 = i64x2::new(0, 1);
let e: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let r: u8x16 = transmute(vreinterpretq_u8_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u8_u64() {
let a: u64x2 = u64x2::new(0, 1);
let e: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let r: u8x16 = transmute(vreinterpretq_u8_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p8_s64() {
let a: i64x2 = i64x2::new(0, 1);
let e: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let r: i8x16 = transmute(vreinterpretq_p8_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p8_u64() {
let a: u64x2 = u64x2::new(0, 1);
let e: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let r: i8x16 = transmute(vreinterpretq_p8_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s8_p64() {
let a: i64x1 = i64x1::new(0);
let e: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: i8x8 = transmute(vreinterpret_s8_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u8_p64() {
let a: i64x1 = i64x1::new(0);
let e: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: u8x8 = transmute(vreinterpret_u8_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p8_p64() {
let a: i64x1 = i64x1::new(0);
let e: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: i8x8 = transmute(vreinterpret_p8_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s8_p64() {
let a: i64x2 = i64x2::new(0, 1);
let e: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let r: i8x16 = transmute(vreinterpretq_s8_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u8_p64() {
let a: i64x2 = i64x2::new(0, 1);
let e: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let r: u8x16 = transmute(vreinterpretq_u8_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p8_p64() {
let a: i64x2 = i64x2::new(0, 1);
let e: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let r: i8x16 = transmute(vreinterpretq_p8_p64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s16_p128() {
let a: p128 = 0;
let e: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: i16x8 = transmute(vreinterpretq_s16_p128(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u16_p128() {
let a: p128 = 0;
let e: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: u16x8 = transmute(vreinterpretq_u16_p128(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p16_p128() {
let a: p128 = 0;
let e: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: i16x8 = transmute(vreinterpretq_p16_p128(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s64_p8() {
let a: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_s64_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s64_s8() {
let a: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_s64_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s64_u8() {
let a: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_s64_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u64_p8() {
let a: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: u64x1 = u64x1::new(0);
let r: u64x1 = transmute(vreinterpret_u64_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u64_s8() {
let a: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: u64x1 = u64x1::new(0);
let r: u64x1 = transmute(vreinterpret_u64_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u64_u8() {
let a: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: u64x1 = u64x1::new(0);
let r: u64x1 = transmute(vreinterpret_u64_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s64_p8() {
let a: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vreinterpretq_s64_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s64_s8() {
let a: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vreinterpretq_s64_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s64_u8() {
let a: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vreinterpretq_s64_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u64_p8() {
let a: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let e: u64x2 = u64x2::new(0, 1);
let r: u64x2 = transmute(vreinterpretq_u64_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u64_s8() {
let a: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let e: u64x2 = u64x2::new(0, 1);
let r: u64x2 = transmute(vreinterpretq_u64_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u64_u8() {
let a: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let e: u64x2 = u64x2::new(0, 1);
let r: u64x2 = transmute(vreinterpretq_u64_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p64_p8() {
let a: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_p64_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p64_s8() {
let a: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_p64_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p64_u8() {
let a: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_p64_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p64_p8() {
let a: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vreinterpretq_p64_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p64_s8() {
let a: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vreinterpretq_p64_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p64_u8() {
let a: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
let r: i64x2 = transmute(vreinterpretq_p64_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p128_s16() {
let a: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: p128 = 0;
let r: p128 = transmute(vreinterpretq_p128_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p128_u16() {
let a: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: p128 = 0;
let r: p128 = transmute(vreinterpretq_p128_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p128_p16() {
let a: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: p128 = 0;
let r: p128 = transmute(vreinterpretq_p128_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p128_s8() {
let a: i8x16 = i8x16::new(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
let e: p128 = 1;
let r: p128 = transmute(vreinterpretq_p128_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p128_u8() {
let a: u8x16 = u8x16::new(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
let e: p128 = 1;
let r: p128 = transmute(vreinterpretq_p128_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p128_p8() {
let a: i8x16 = i8x16::new(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
let e: p128 = 1;
let r: p128 = transmute(vreinterpretq_p128_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s8_p128() {
let a: p128 = 1;
let e: i8x16 = i8x16::new(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
let r: i8x16 = transmute(vreinterpretq_s8_p128(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u8_p128() {
let a: p128 = 1;
let e: u8x16 = u8x16::new(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
let r: u8x16 = transmute(vreinterpretq_u8_p128(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p8_p128() {
let a: p128 = 1;
let e: i8x16 = i8x16::new(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
let r: i8x16 = transmute(vreinterpretq_p8_p128(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s8_f32() {
let a: f32x2 = f32x2::new(0., 0.);
let e: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: i8x8 = transmute(vreinterpret_s8_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s16_f32() {
let a: f32x2 = f32x2::new(0., 0.);
let e: i16x4 = i16x4::new(0, 0, 0, 0);
let r: i16x4 = transmute(vreinterpret_s16_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s32_f32() {
let a: f32x2 = f32x2::new(0., 0.);
let e: i32x2 = i32x2::new(0, 0);
let r: i32x2 = transmute(vreinterpret_s32_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_s64_f32() {
let a: f32x2 = f32x2::new(0., 0.);
let e: i64x1 = i64x1::new(0);
let r: i64x1 = transmute(vreinterpret_s64_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s8_f32() {
let a: f32x4 = f32x4::new(0., 0., 0., 0.);
let e: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
let r: i8x16 = transmute(vreinterpretq_s8_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s16_f32() {
let a: f32x4 = f32x4::new(0., 0., 0., 0.);
let e: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: i16x8 = transmute(vreinterpretq_s16_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s32_f32() {
let a: f32x4 = f32x4::new(0., 0., 0., 0.);
let e: i32x4 = i32x4::new(0, 0, 0, 0);
let r: i32x4 = transmute(vreinterpretq_s32_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_s64_f32() {
let a: f32x4 = f32x4::new(0., 0., 0., 0.);
let e: i64x2 = i64x2::new(0, 0);
let r: i64x2 = transmute(vreinterpretq_s64_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u8_f32() {
let a: f32x2 = f32x2::new(0., 0.);
let e: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: u8x8 = transmute(vreinterpret_u8_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u16_f32() {
let a: f32x2 = f32x2::new(0., 0.);
let e: u16x4 = u16x4::new(0, 0, 0, 0);
let r: u16x4 = transmute(vreinterpret_u16_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u32_f32() {
let a: f32x2 = f32x2::new(0., 0.);
let e: u32x2 = u32x2::new(0, 0);
let r: u32x2 = transmute(vreinterpret_u32_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_u64_f32() {
let a: f32x2 = f32x2::new(0., 0.);
let e: u64x1 = u64x1::new(0);
let r: u64x1 = transmute(vreinterpret_u64_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u8_f32() {
let a: f32x4 = f32x4::new(0., 0., 0., 0.);
let e: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
let r: u8x16 = transmute(vreinterpretq_u8_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u16_f32() {
let a: f32x4 = f32x4::new(0., 0., 0., 0.);
let e: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: u16x8 = transmute(vreinterpretq_u16_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u32_f32() {
let a: f32x4 = f32x4::new(0., 0., 0., 0.);
let e: u32x4 = u32x4::new(0, 0, 0, 0);
let r: u32x4 = transmute(vreinterpretq_u32_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_u64_f32() {
let a: f32x4 = f32x4::new(0., 0., 0., 0.);
let e: u64x2 = u64x2::new(0, 0);
let r: u64x2 = transmute(vreinterpretq_u64_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p8_f32() {
let a: f32x2 = f32x2::new(0., 0.);
let e: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: i8x8 = transmute(vreinterpret_p8_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_p16_f32() {
let a: f32x2 = f32x2::new(0., 0.);
let e: i16x4 = i16x4::new(0, 0, 0, 0);
let r: i16x4 = transmute(vreinterpret_p16_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p8_f32() {
let a: f32x4 = f32x4::new(0., 0., 0., 0.);
let e: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
let r: i8x16 = transmute(vreinterpretq_p8_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p16_f32() {
let a: f32x4 = f32x4::new(0., 0., 0., 0.);
let e: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: i16x8 = transmute(vreinterpretq_p16_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_p128_f32() {
let a: f32x4 = f32x4::new(0., 0., 0., 0.);
let e: p128 = 0;
let r: p128 = transmute(vreinterpretq_p128_f32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_f32_s8() {
let a: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: f32x2 = f32x2::new(0., 0.);
let r: f32x2 = transmute(vreinterpret_f32_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_f32_s16() {
let a: i16x4 = i16x4::new(0, 0, 0, 0);
let e: f32x2 = f32x2::new(0., 0.);
let r: f32x2 = transmute(vreinterpret_f32_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_f32_s32() {
let a: i32x2 = i32x2::new(0, 0);
let e: f32x2 = f32x2::new(0., 0.);
let r: f32x2 = transmute(vreinterpret_f32_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_f32_s64() {
let a: i64x1 = i64x1::new(0);
let e: f32x2 = f32x2::new(0., 0.);
let r: f32x2 = transmute(vreinterpret_f32_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_f32_s8() {
let a: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
let e: f32x4 = f32x4::new(0., 0., 0., 0.);
let r: f32x4 = transmute(vreinterpretq_f32_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_f32_s16() {
let a: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: f32x4 = f32x4::new(0., 0., 0., 0.);
let r: f32x4 = transmute(vreinterpretq_f32_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_f32_s32() {
let a: i32x4 = i32x4::new(0, 0, 0, 0);
let e: f32x4 = f32x4::new(0., 0., 0., 0.);
let r: f32x4 = transmute(vreinterpretq_f32_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_f32_s64() {
let a: i64x2 = i64x2::new(0, 0);
let e: f32x4 = f32x4::new(0., 0., 0., 0.);
let r: f32x4 = transmute(vreinterpretq_f32_s64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_f32_u8() {
let a: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: f32x2 = f32x2::new(0., 0.);
let r: f32x2 = transmute(vreinterpret_f32_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_f32_u16() {
let a: u16x4 = u16x4::new(0, 0, 0, 0);
let e: f32x2 = f32x2::new(0., 0.);
let r: f32x2 = transmute(vreinterpret_f32_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_f32_u32() {
let a: u32x2 = u32x2::new(0, 0);
let e: f32x2 = f32x2::new(0., 0.);
let r: f32x2 = transmute(vreinterpret_f32_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_f32_u64() {
let a: u64x1 = u64x1::new(0);
let e: f32x2 = f32x2::new(0., 0.);
let r: f32x2 = transmute(vreinterpret_f32_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_f32_u8() {
let a: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
let e: f32x4 = f32x4::new(0., 0., 0., 0.);
let r: f32x4 = transmute(vreinterpretq_f32_u8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_f32_u16() {
let a: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: f32x4 = f32x4::new(0., 0., 0., 0.);
let r: f32x4 = transmute(vreinterpretq_f32_u16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_f32_u32() {
let a: u32x4 = u32x4::new(0, 0, 0, 0);
let e: f32x4 = f32x4::new(0., 0., 0., 0.);
let r: f32x4 = transmute(vreinterpretq_f32_u32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_f32_u64() {
let a: u64x2 = u64x2::new(0, 0);
let e: f32x4 = f32x4::new(0., 0., 0., 0.);
let r: f32x4 = transmute(vreinterpretq_f32_u64(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_f32_p8() {
let a: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: f32x2 = f32x2::new(0., 0.);
let r: f32x2 = transmute(vreinterpret_f32_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpret_f32_p16() {
let a: i16x4 = i16x4::new(0, 0, 0, 0);
let e: f32x2 = f32x2::new(0., 0.);
let r: f32x2 = transmute(vreinterpret_f32_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_f32_p8() {
let a: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
let e: f32x4 = f32x4::new(0., 0., 0., 0.);
let r: f32x4 = transmute(vreinterpretq_f32_p8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_f32_p16() {
let a: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: f32x4 = f32x4::new(0., 0., 0., 0.);
let r: f32x4 = transmute(vreinterpretq_f32_p16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vreinterpretq_f32_p128() {
let a: p128 = 0;
let e: f32x4 = f32x4::new(0., 0., 0., 0.);
let r: f32x4 = transmute(vreinterpretq_f32_p128(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshl_s8() {
let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i8x8 = i8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: i8x8 = i8x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let r: i8x8 = transmute(vrshl_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshlq_s8() {
let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let e: i8x16 = i8x16::new(4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64);
let r: i8x16 = transmute(vrshlq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshl_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let e: i16x4 = i16x4::new(4, 8, 12, 16);
let r: i16x4 = transmute(vrshl_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshlq_s16() {
let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: i16x8 = i16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let r: i16x8 = transmute(vrshlq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshl_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32x2 = i32x2::new(2, 2);
let e: i32x2 = i32x2::new(4, 8);
let r: i32x2 = transmute(vrshl_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshlq_s32() {
let a: i32x4 = i32x4::new(1, 2, 3, 4);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let e: i32x4 = i32x4::new(4, 8, 12, 16);
let r: i32x4 = transmute(vrshlq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshl_s64() {
let a: i64x1 = i64x1::new(1);
let b: i64x1 = i64x1::new(2);
let e: i64x1 = i64x1::new(4);
let r: i64x1 = transmute(vrshl_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshlq_s64() {
let a: i64x2 = i64x2::new(1, 2);
let b: i64x2 = i64x2::new(2, 2);
let e: i64x2 = i64x2::new(4, 8);
let r: i64x2 = transmute(vrshlq_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshl_u8() {
let a: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i8x8 = i8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: u8x8 = u8x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let r: u8x8 = transmute(vrshl_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshlq_u8() {
let a: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let e: u8x16 = u8x16::new(4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64);
let r: u8x16 = transmute(vrshlq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshl_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let e: u16x4 = u16x4::new(4, 8, 12, 16);
let r: u16x4 = transmute(vrshl_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshlq_u16() {
let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: u16x8 = u16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let r: u16x8 = transmute(vrshlq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshl_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: i32x2 = i32x2::new(2, 2);
let e: u32x2 = u32x2::new(4, 8);
let r: u32x2 = transmute(vrshl_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshlq_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let e: u32x4 = u32x4::new(4, 8, 12, 16);
let r: u32x4 = transmute(vrshlq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshl_u64() {
let a: u64x1 = u64x1::new(1);
let b: i64x1 = i64x1::new(2);
let e: u64x1 = u64x1::new(4);
let r: u64x1 = transmute(vrshl_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshlq_u64() {
let a: u64x2 = u64x2::new(1, 2);
let b: i64x2 = i64x2::new(2, 2);
let e: u64x2 = u64x2::new(4, 8);
let r: u64x2 = transmute(vrshlq_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshr_n_s8() {
let a: i8x8 = i8x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: i8x8 = transmute(vrshr_n_s8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshrq_n_s8() {
let a: i8x16 = i8x16::new(4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64);
let e: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let r: i8x16 = transmute(vrshrq_n_s8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshr_n_s16() {
let a: i16x4 = i16x4::new(4, 8, 12, 16);
let e: i16x4 = i16x4::new(1, 2, 3, 4);
let r: i16x4 = transmute(vrshr_n_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshrq_n_s16() {
let a: i16x8 = i16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: i16x8 = transmute(vrshrq_n_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshr_n_s32() {
let a: i32x2 = i32x2::new(4, 8);
let e: i32x2 = i32x2::new(1, 2);
let r: i32x2 = transmute(vrshr_n_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshrq_n_s32() {
let a: i32x4 = i32x4::new(4, 8, 12, 16);
let e: i32x4 = i32x4::new(1, 2, 3, 4);
let r: i32x4 = transmute(vrshrq_n_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshr_n_s64() {
let a: i64x1 = i64x1::new(4);
let e: i64x1 = i64x1::new(1);
let r: i64x1 = transmute(vrshr_n_s64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshrq_n_s64() {
let a: i64x2 = i64x2::new(4, 8);
let e: i64x2 = i64x2::new(1, 2);
let r: i64x2 = transmute(vrshrq_n_s64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshr_n_u8() {
let a: u8x8 = u8x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: u8x8 = transmute(vrshr_n_u8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshrq_n_u8() {
let a: u8x16 = u8x16::new(4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64);
let e: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let r: u8x16 = transmute(vrshrq_n_u8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshr_n_u16() {
let a: u16x4 = u16x4::new(4, 8, 12, 16);
let e: u16x4 = u16x4::new(1, 2, 3, 4);
let r: u16x4 = transmute(vrshr_n_u16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshrq_n_u16() {
let a: u16x8 = u16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: u16x8 = transmute(vrshrq_n_u16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshr_n_u32() {
let a: u32x2 = u32x2::new(4, 8);
let e: u32x2 = u32x2::new(1, 2);
let r: u32x2 = transmute(vrshr_n_u32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshrq_n_u32() {
let a: u32x4 = u32x4::new(4, 8, 12, 16);
let e: u32x4 = u32x4::new(1, 2, 3, 4);
let r: u32x4 = transmute(vrshrq_n_u32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshr_n_u64() {
let a: u64x1 = u64x1::new(4);
let e: u64x1 = u64x1::new(1);
let r: u64x1 = transmute(vrshr_n_u64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshrq_n_u64() {
let a: u64x2 = u64x2::new(4, 8);
let e: u64x2 = u64x2::new(1, 2);
let r: u64x2 = transmute(vrshrq_n_u64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshrn_n_s16() {
let a: i16x8 = i16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: i8x8 = transmute(vrshrn_n_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshrn_n_s32() {
let a: i32x4 = i32x4::new(4, 8, 12, 16);
let e: i16x4 = i16x4::new(1, 2, 3, 4);
let r: i16x4 = transmute(vrshrn_n_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshrn_n_s64() {
let a: i64x2 = i64x2::new(4, 8);
let e: i32x2 = i32x2::new(1, 2);
let r: i32x2 = transmute(vrshrn_n_s64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshrn_n_u16() {
let a: u16x8 = u16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: u8x8 = transmute(vrshrn_n_u16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshrn_n_u32() {
let a: u32x4 = u32x4::new(4, 8, 12, 16);
let e: u16x4 = u16x4::new(1, 2, 3, 4);
let r: u16x4 = transmute(vrshrn_n_u32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrshrn_n_u64() {
let a: u64x2 = u64x2::new(4, 8);
let e: u32x2 = u32x2::new(1, 2);
let r: u32x2 = transmute(vrshrn_n_u64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsra_n_s8() {
let a: i8x8 = i8x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: i8x8 = i8x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: i8x8 = i8x8::new(2, 3, 4, 5, 6, 7, 8, 9);
let r: i8x8 = transmute(vrsra_n_s8::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsraq_n_s8() {
let a: i8x16 = i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
let b: i8x16 = i8x16::new(4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64);
let e: i8x16 = i8x16::new(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17);
let r: i8x16 = transmute(vrsraq_n_s8::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsra_n_s16() {
let a: i16x4 = i16x4::new(1, 1, 1, 1);
let b: i16x4 = i16x4::new(4, 8, 12, 16);
let e: i16x4 = i16x4::new(2, 3, 4, 5);
let r: i16x4 = transmute(vrsra_n_s16::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsraq_n_s16() {
let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: i16x8 = i16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: i16x8 = i16x8::new(2, 3, 4, 5, 6, 7, 8, 9);
let r: i16x8 = transmute(vrsraq_n_s16::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsra_n_s32() {
let a: i32x2 = i32x2::new(1, 1);
let b: i32x2 = i32x2::new(4, 8);
let e: i32x2 = i32x2::new(2, 3);
let r: i32x2 = transmute(vrsra_n_s32::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsraq_n_s32() {
let a: i32x4 = i32x4::new(1, 1, 1, 1);
let b: i32x4 = i32x4::new(4, 8, 12, 16);
let e: i32x4 = i32x4::new(2, 3, 4, 5);
let r: i32x4 = transmute(vrsraq_n_s32::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsra_n_s64() {
let a: i64x1 = i64x1::new(1);
let b: i64x1 = i64x1::new(4);
let e: i64x1 = i64x1::new(2);
let r: i64x1 = transmute(vrsra_n_s64::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsraq_n_s64() {
let a: i64x2 = i64x2::new(1, 1);
let b: i64x2 = i64x2::new(4, 8);
let e: i64x2 = i64x2::new(2, 3);
let r: i64x2 = transmute(vrsraq_n_s64::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsra_n_u8() {
let a: u8x8 = u8x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: u8x8 = u8x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: u8x8 = u8x8::new(2, 3, 4, 5, 6, 7, 8, 9);
let r: u8x8 = transmute(vrsra_n_u8::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsraq_n_u8() {
let a: u8x16 = u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
let b: u8x16 = u8x16::new(4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64);
let e: u8x16 = u8x16::new(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17);
let r: u8x16 = transmute(vrsraq_n_u8::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsra_n_u16() {
let a: u16x4 = u16x4::new(1, 1, 1, 1);
let b: u16x4 = u16x4::new(4, 8, 12, 16);
let e: u16x4 = u16x4::new(2, 3, 4, 5);
let r: u16x4 = transmute(vrsra_n_u16::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsraq_n_u16() {
let a: u16x8 = u16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: u16x8 = u16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: u16x8 = u16x8::new(2, 3, 4, 5, 6, 7, 8, 9);
let r: u16x8 = transmute(vrsraq_n_u16::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsra_n_u32() {
let a: u32x2 = u32x2::new(1, 1);
let b: u32x2 = u32x2::new(4, 8);
let e: u32x2 = u32x2::new(2, 3);
let r: u32x2 = transmute(vrsra_n_u32::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsraq_n_u32() {
let a: u32x4 = u32x4::new(1, 1, 1, 1);
let b: u32x4 = u32x4::new(4, 8, 12, 16);
let e: u32x4 = u32x4::new(2, 3, 4, 5);
let r: u32x4 = transmute(vrsraq_n_u32::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsra_n_u64() {
let a: u64x1 = u64x1::new(1);
let b: u64x1 = u64x1::new(4);
let e: u64x1 = u64x1::new(2);
let r: u64x1 = transmute(vrsra_n_u64::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsraq_n_u64() {
let a: u64x2 = u64x2::new(1, 1);
let b: u64x2 = u64x2::new(4, 8);
let e: u64x2 = u64x2::new(2, 3);
let r: u64x2 = transmute(vrsraq_n_u64::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsubhn_s16() {
let a: i16x8 = i16x8::new(0x7F_FF, -32768, 0, 4, 5, 6, 7, 8);
let b: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: i8x8 = i8x8::new(-128, -128, 0, 0, 0, 0, 0, 0);
let r: i8x8 = transmute(vrsubhn_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsubhn_s32() {
let a: i32x4 = i32x4::new(0x7F_FF_FF_FF, -2147483648, 0, 4);
let b: i32x4 = i32x4::new(1, 2, 3, 4);
let e: i16x4 = i16x4::new(-32768, -32768, 0, 0);
let r: i16x4 = transmute(vrsubhn_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsubhn_s64() {
let a: i64x2 = i64x2::new(0x7F_FF_FF_FF_FF_FF_FF_FF, -9223372036854775808);
let b: i64x2 = i64x2::new(1, 2);
let e: i32x2 = i32x2::new(-2147483648, -2147483648);
let r: i32x2 = transmute(vrsubhn_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsubhn_u16() {
let a: u16x8 = u16x8::new(0xFF_FF, 0, 3, 4, 5, 6, 7, 8);
let b: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let r: u8x8 = transmute(vrsubhn_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsubhn_u32() {
let a: u32x4 = u32x4::new(0xFF_FF_FF_FF, 0, 3, 4);
let b: u32x4 = u32x4::new(1, 2, 3, 4);
let e: u16x4 = u16x4::new(0, 0, 0, 0);
let r: u16x4 = transmute(vrsubhn_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vrsubhn_u64() {
let a: u64x2 = u64x2::new(0xFF_FF_FF_FF_FF_FF_FF_FF, 0);
let b: u64x2 = u64x2::new(1, 2);
let e: u32x2 = u32x2::new(0, 0);
let r: u32x2 = transmute(vrsubhn_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vset_lane_s8() {
let a: i8 = 1;
let b: i8x8 = i8x8::new(0, 2, 3, 4, 5, 6, 7, 8);
let e: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: i8x8 = transmute(vset_lane_s8::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vset_lane_s16() {
let a: i16 = 1;
let b: i16x4 = i16x4::new(0, 2, 3, 4);
let e: i16x4 = i16x4::new(1, 2, 3, 4);
let r: i16x4 = transmute(vset_lane_s16::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vset_lane_s32() {
let a: i32 = 1;
let b: i32x2 = i32x2::new(0, 2);
let e: i32x2 = i32x2::new(1, 2);
let r: i32x2 = transmute(vset_lane_s32::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vset_lane_s64() {
let a: i64 = 1;
let b: i64x1 = i64x1::new(0);
let e: i64x1 = i64x1::new(1);
let r: i64x1 = transmute(vset_lane_s64::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vset_lane_u8() {
let a: u8 = 1;
let b: u8x8 = u8x8::new(0, 2, 3, 4, 5, 6, 7, 8);
let e: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: u8x8 = transmute(vset_lane_u8::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vset_lane_u16() {
let a: u16 = 1;
let b: u16x4 = u16x4::new(0, 2, 3, 4);
let e: u16x4 = u16x4::new(1, 2, 3, 4);
let r: u16x4 = transmute(vset_lane_u16::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vset_lane_u32() {
let a: u32 = 1;
let b: u32x2 = u32x2::new(0, 2);
let e: u32x2 = u32x2::new(1, 2);
let r: u32x2 = transmute(vset_lane_u32::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vset_lane_u64() {
let a: u64 = 1;
let b: u64x1 = u64x1::new(0);
let e: u64x1 = u64x1::new(1);
let r: u64x1 = transmute(vset_lane_u64::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vset_lane_p8() {
let a: p8 = 1;
let b: i8x8 = i8x8::new(0, 2, 3, 4, 5, 6, 7, 8);
let e: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: i8x8 = transmute(vset_lane_p8::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vset_lane_p16() {
let a: p16 = 1;
let b: i16x4 = i16x4::new(0, 2, 3, 4);
let e: i16x4 = i16x4::new(1, 2, 3, 4);
let r: i16x4 = transmute(vset_lane_p16::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vset_lane_p64() {
let a: p64 = 1;
let b: i64x1 = i64x1::new(0);
let e: i64x1 = i64x1::new(1);
let r: i64x1 = transmute(vset_lane_p64::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsetq_lane_s8() {
let a: i8 = 1;
let b: i8x16 = i8x16::new(0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let r: i8x16 = transmute(vsetq_lane_s8::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsetq_lane_s16() {
let a: i16 = 1;
let b: i16x8 = i16x8::new(0, 2, 3, 4, 5, 6, 7, 8);
let e: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: i16x8 = transmute(vsetq_lane_s16::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsetq_lane_s32() {
let a: i32 = 1;
let b: i32x4 = i32x4::new(0, 2, 3, 4);
let e: i32x4 = i32x4::new(1, 2, 3, 4);
let r: i32x4 = transmute(vsetq_lane_s32::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsetq_lane_s64() {
let a: i64 = 1;
let b: i64x2 = i64x2::new(0, 2);
let e: i64x2 = i64x2::new(1, 2);
let r: i64x2 = transmute(vsetq_lane_s64::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsetq_lane_u8() {
let a: u8 = 1;
let b: u8x16 = u8x16::new(0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let r: u8x16 = transmute(vsetq_lane_u8::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsetq_lane_u16() {
let a: u16 = 1;
let b: u16x8 = u16x8::new(0, 2, 3, 4, 5, 6, 7, 8);
let e: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: u16x8 = transmute(vsetq_lane_u16::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsetq_lane_u32() {
let a: u32 = 1;
let b: u32x4 = u32x4::new(0, 2, 3, 4);
let e: u32x4 = u32x4::new(1, 2, 3, 4);
let r: u32x4 = transmute(vsetq_lane_u32::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsetq_lane_u64() {
let a: u64 = 1;
let b: u64x2 = u64x2::new(0, 2);
let e: u64x2 = u64x2::new(1, 2);
let r: u64x2 = transmute(vsetq_lane_u64::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsetq_lane_p8() {
let a: p8 = 1;
let b: i8x16 = i8x16::new(0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let r: i8x16 = transmute(vsetq_lane_p8::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsetq_lane_p16() {
let a: p16 = 1;
let b: i16x8 = i16x8::new(0, 2, 3, 4, 5, 6, 7, 8);
let e: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: i16x8 = transmute(vsetq_lane_p16::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsetq_lane_p64() {
let a: p64 = 1;
let b: i64x2 = i64x2::new(0, 2);
let e: i64x2 = i64x2::new(1, 2);
let r: i64x2 = transmute(vsetq_lane_p64::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vset_lane_f32() {
let a: f32 = 1.;
let b: f32x2 = f32x2::new(0., 2.);
let e: f32x2 = f32x2::new(1., 2.);
let r: f32x2 = transmute(vset_lane_f32::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsetq_lane_f32() {
let a: f32 = 1.;
let b: f32x4 = f32x4::new(0., 2., 3., 4.);
let e: f32x4 = f32x4::new(1., 2., 3., 4.);
let r: f32x4 = transmute(vsetq_lane_f32::<0>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshl_s8() {
let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i8x8 = i8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: i8x8 = i8x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let r: i8x8 = transmute(vshl_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshlq_s8() {
let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let e: i8x16 = i8x16::new(4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64);
let r: i8x16 = transmute(vshlq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshl_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let e: i16x4 = i16x4::new(4, 8, 12, 16);
let r: i16x4 = transmute(vshl_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshlq_s16() {
let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: i16x8 = i16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let r: i16x8 = transmute(vshlq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshl_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32x2 = i32x2::new(2, 2);
let e: i32x2 = i32x2::new(4, 8);
let r: i32x2 = transmute(vshl_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshlq_s32() {
let a: i32x4 = i32x4::new(1, 2, 3, 4);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let e: i32x4 = i32x4::new(4, 8, 12, 16);
let r: i32x4 = transmute(vshlq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshl_s64() {
let a: i64x1 = i64x1::new(1);
let b: i64x1 = i64x1::new(2);
let e: i64x1 = i64x1::new(4);
let r: i64x1 = transmute(vshl_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshlq_s64() {
let a: i64x2 = i64x2::new(1, 2);
let b: i64x2 = i64x2::new(2, 2);
let e: i64x2 = i64x2::new(4, 8);
let r: i64x2 = transmute(vshlq_s64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshl_u8() {
let a: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i8x8 = i8x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: u8x8 = u8x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let r: u8x8 = transmute(vshl_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshlq_u8() {
let a: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let b: i8x16 = i8x16::new(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2);
let e: u8x16 = u8x16::new(4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64);
let r: u8x16 = transmute(vshlq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshl_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(2, 2, 2, 2);
let e: u16x4 = u16x4::new(4, 8, 12, 16);
let r: u16x4 = transmute(vshl_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshlq_u16() {
let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i16x8 = i16x8::new(2, 2, 2, 2, 2, 2, 2, 2);
let e: u16x8 = u16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let r: u16x8 = transmute(vshlq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshl_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: i32x2 = i32x2::new(2, 2);
let e: u32x2 = u32x2::new(4, 8);
let r: u32x2 = transmute(vshl_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshlq_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let b: i32x4 = i32x4::new(2, 2, 2, 2);
let e: u32x4 = u32x4::new(4, 8, 12, 16);
let r: u32x4 = transmute(vshlq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshl_u64() {
let a: u64x1 = u64x1::new(1);
let b: i64x1 = i64x1::new(2);
let e: u64x1 = u64x1::new(4);
let r: u64x1 = transmute(vshl_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshlq_u64() {
let a: u64x2 = u64x2::new(1, 2);
let b: i64x2 = i64x2::new(2, 2);
let e: u64x2 = u64x2::new(4, 8);
let r: u64x2 = transmute(vshlq_u64(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshl_n_s8() {
let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: i8x8 = i8x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let r: i8x8 = transmute(vshl_n_s8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshlq_n_s8() {
let a: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: i8x16 = i8x16::new(4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64);
let r: i8x16 = transmute(vshlq_n_s8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshl_n_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let e: i16x4 = i16x4::new(4, 8, 12, 16);
let r: i16x4 = transmute(vshl_n_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshlq_n_s16() {
let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: i16x8 = i16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let r: i16x8 = transmute(vshlq_n_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshl_n_s32() {
let a: i32x2 = i32x2::new(1, 2);
let e: i32x2 = i32x2::new(4, 8);
let r: i32x2 = transmute(vshl_n_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshlq_n_s32() {
let a: i32x4 = i32x4::new(1, 2, 3, 4);
let e: i32x4 = i32x4::new(4, 8, 12, 16);
let r: i32x4 = transmute(vshlq_n_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshl_n_u8() {
let a: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u8x8 = u8x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let r: u8x8 = transmute(vshl_n_u8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshlq_n_u8() {
let a: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let e: u8x16 = u8x16::new(4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64);
let r: u8x16 = transmute(vshlq_n_u8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshl_n_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let e: u16x4 = u16x4::new(4, 8, 12, 16);
let r: u16x4 = transmute(vshl_n_u16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshlq_n_u16() {
let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u16x8 = u16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let r: u16x8 = transmute(vshlq_n_u16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshl_n_u32() {
let a: u32x2 = u32x2::new(1, 2);
let e: u32x2 = u32x2::new(4, 8);
let r: u32x2 = transmute(vshl_n_u32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshlq_n_u32() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let e: u32x4 = u32x4::new(4, 8, 12, 16);
let r: u32x4 = transmute(vshlq_n_u32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshl_n_s64() {
let a: i64x1 = i64x1::new(1);
let e: i64x1 = i64x1::new(4);
let r: i64x1 = transmute(vshl_n_s64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshlq_n_s64() {
let a: i64x2 = i64x2::new(1, 2);
let e: i64x2 = i64x2::new(4, 8);
let r: i64x2 = transmute(vshlq_n_s64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshl_n_u64() {
let a: u64x1 = u64x1::new(1);
let e: u64x1 = u64x1::new(4);
let r: u64x1 = transmute(vshl_n_u64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshlq_n_u64() {
let a: u64x2 = u64x2::new(1, 2);
let e: u64x2 = u64x2::new(4, 8);
let r: u64x2 = transmute(vshlq_n_u64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshll_n_s8() {
let a: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: i16x8 = i16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let r: i16x8 = transmute(vshll_n_s8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshll_n_s16() {
let a: i16x4 = i16x4::new(1, 2, 3, 4);
let e: i32x4 = i32x4::new(4, 8, 12, 16);
let r: i32x4 = transmute(vshll_n_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshll_n_s32() {
let a: i32x2 = i32x2::new(1, 2);
let e: i64x2 = i64x2::new(4, 8);
let r: i64x2 = transmute(vshll_n_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshll_n_u8() {
let a: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let e: u16x8 = u16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let r: u16x8 = transmute(vshll_n_u8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshll_n_u16() {
let a: u16x4 = u16x4::new(1, 2, 3, 4);
let e: u32x4 = u32x4::new(4, 8, 12, 16);
let r: u32x4 = transmute(vshll_n_u16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshll_n_u32() {
let a: u32x2 = u32x2::new(1, 2);
let e: u64x2 = u64x2::new(4, 8);
let r: u64x2 = transmute(vshll_n_u32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshr_n_s8() {
let a: i8x8 = i8x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: i8x8 = transmute(vshr_n_s8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshrq_n_s8() {
let a: i8x16 = i8x16::new(4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64);
let e: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let r: i8x16 = transmute(vshrq_n_s8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshr_n_s16() {
let a: i16x4 = i16x4::new(4, 8, 12, 16);
let e: i16x4 = i16x4::new(1, 2, 3, 4);
let r: i16x4 = transmute(vshr_n_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshrq_n_s16() {
let a: i16x8 = i16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: i16x8 = transmute(vshrq_n_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshr_n_s32() {
let a: i32x2 = i32x2::new(4, 8);
let e: i32x2 = i32x2::new(1, 2);
let r: i32x2 = transmute(vshr_n_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshrq_n_s32() {
let a: i32x4 = i32x4::new(4, 8, 12, 16);
let e: i32x4 = i32x4::new(1, 2, 3, 4);
let r: i32x4 = transmute(vshrq_n_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshr_n_s64() {
let a: i64x1 = i64x1::new(4);
let e: i64x1 = i64x1::new(1);
let r: i64x1 = transmute(vshr_n_s64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshrq_n_s64() {
let a: i64x2 = i64x2::new(4, 8);
let e: i64x2 = i64x2::new(1, 2);
let r: i64x2 = transmute(vshrq_n_s64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshr_n_u8() {
let a: u8x8 = u8x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: u8x8 = transmute(vshr_n_u8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshrq_n_u8() {
let a: u8x16 = u8x16::new(4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64);
let e: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
let r: u8x16 = transmute(vshrq_n_u8::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshr_n_u16() {
let a: u16x4 = u16x4::new(4, 8, 12, 16);
let e: u16x4 = u16x4::new(1, 2, 3, 4);
let r: u16x4 = transmute(vshr_n_u16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshrq_n_u16() {
let a: u16x8 = u16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: u16x8 = transmute(vshrq_n_u16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshr_n_u32() {
let a: u32x2 = u32x2::new(4, 8);
let e: u32x2 = u32x2::new(1, 2);
let r: u32x2 = transmute(vshr_n_u32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshrq_n_u32() {
let a: u32x4 = u32x4::new(4, 8, 12, 16);
let e: u32x4 = u32x4::new(1, 2, 3, 4);
let r: u32x4 = transmute(vshrq_n_u32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshr_n_u64() {
let a: u64x1 = u64x1::new(4);
let e: u64x1 = u64x1::new(1);
let r: u64x1 = transmute(vshr_n_u64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshrq_n_u64() {
let a: u64x2 = u64x2::new(4, 8);
let e: u64x2 = u64x2::new(1, 2);
let r: u64x2 = transmute(vshrq_n_u64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshrn_n_s16() {
let a: i16x8 = i16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: i8x8 = transmute(vshrn_n_s16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshrn_n_s32() {
let a: i32x4 = i32x4::new(4, 8, 12, 16);
let e: i16x4 = i16x4::new(1, 2, 3, 4);
let r: i16x4 = transmute(vshrn_n_s32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshrn_n_s64() {
let a: i64x2 = i64x2::new(4, 8);
let e: i32x2 = i32x2::new(1, 2);
let r: i32x2 = transmute(vshrn_n_s64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshrn_n_u16() {
let a: u16x8 = u16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let r: u8x8 = transmute(vshrn_n_u16::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshrn_n_u32() {
let a: u32x4 = u32x4::new(4, 8, 12, 16);
let e: u16x4 = u16x4::new(1, 2, 3, 4);
let r: u16x4 = transmute(vshrn_n_u32::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vshrn_n_u64() {
let a: u64x2 = u64x2::new(4, 8);
let e: u32x2 = u32x2::new(1, 2);
let r: u32x2 = transmute(vshrn_n_u64::<2>(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsra_n_s8() {
let a: i8x8 = i8x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: i8x8 = i8x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: i8x8 = i8x8::new(2, 3, 4, 5, 6, 7, 8, 9);
let r: i8x8 = transmute(vsra_n_s8::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsraq_n_s8() {
let a: i8x16 = i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
let b: i8x16 = i8x16::new(4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64);
let e: i8x16 = i8x16::new(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17);
let r: i8x16 = transmute(vsraq_n_s8::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsra_n_s16() {
let a: i16x4 = i16x4::new(1, 1, 1, 1);
let b: i16x4 = i16x4::new(4, 8, 12, 16);
let e: i16x4 = i16x4::new(2, 3, 4, 5);
let r: i16x4 = transmute(vsra_n_s16::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsraq_n_s16() {
let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: i16x8 = i16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: i16x8 = i16x8::new(2, 3, 4, 5, 6, 7, 8, 9);
let r: i16x8 = transmute(vsraq_n_s16::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsra_n_s32() {
let a: i32x2 = i32x2::new(1, 1);
let b: i32x2 = i32x2::new(4, 8);
let e: i32x2 = i32x2::new(2, 3);
let r: i32x2 = transmute(vsra_n_s32::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsraq_n_s32() {
let a: i32x4 = i32x4::new(1, 1, 1, 1);
let b: i32x4 = i32x4::new(4, 8, 12, 16);
let e: i32x4 = i32x4::new(2, 3, 4, 5);
let r: i32x4 = transmute(vsraq_n_s32::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsra_n_s64() {
let a: i64x1 = i64x1::new(1);
let b: i64x1 = i64x1::new(4);
let e: i64x1 = i64x1::new(2);
let r: i64x1 = transmute(vsra_n_s64::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsraq_n_s64() {
let a: i64x2 = i64x2::new(1, 1);
let b: i64x2 = i64x2::new(4, 8);
let e: i64x2 = i64x2::new(2, 3);
let r: i64x2 = transmute(vsraq_n_s64::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsra_n_u8() {
let a: u8x8 = u8x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: u8x8 = u8x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: u8x8 = u8x8::new(2, 3, 4, 5, 6, 7, 8, 9);
let r: u8x8 = transmute(vsra_n_u8::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsraq_n_u8() {
let a: u8x16 = u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
let b: u8x16 = u8x16::new(4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64);
let e: u8x16 = u8x16::new(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17);
let r: u8x16 = transmute(vsraq_n_u8::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsra_n_u16() {
let a: u16x4 = u16x4::new(1, 1, 1, 1);
let b: u16x4 = u16x4::new(4, 8, 12, 16);
let e: u16x4 = u16x4::new(2, 3, 4, 5);
let r: u16x4 = transmute(vsra_n_u16::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsraq_n_u16() {
let a: u16x8 = u16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: u16x8 = u16x8::new(4, 8, 12, 16, 20, 24, 28, 32);
let e: u16x8 = u16x8::new(2, 3, 4, 5, 6, 7, 8, 9);
let r: u16x8 = transmute(vsraq_n_u16::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsra_n_u32() {
let a: u32x2 = u32x2::new(1, 1);
let b: u32x2 = u32x2::new(4, 8);
let e: u32x2 = u32x2::new(2, 3);
let r: u32x2 = transmute(vsra_n_u32::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsraq_n_u32() {
let a: u32x4 = u32x4::new(1, 1, 1, 1);
let b: u32x4 = u32x4::new(4, 8, 12, 16);
let e: u32x4 = u32x4::new(2, 3, 4, 5);
let r: u32x4 = transmute(vsraq_n_u32::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsra_n_u64() {
let a: u64x1 = u64x1::new(1);
let b: u64x1 = u64x1::new(4);
let e: u64x1 = u64x1::new(2);
let r: u64x1 = transmute(vsra_n_u64::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vsraq_n_u64() {
let a: u64x2 = u64x2::new(1, 1);
let b: u64x2 = u64x2::new(4, 8);
let e: u64x2 = u64x2::new(2, 3);
let r: u64x2 = transmute(vsraq_n_u64::<2>(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrn_s8() {
let a: i8x8 = i8x8::new(0, 2, 2, 6, 2, 10, 6, 14);
let b: i8x8 = i8x8::new(1, 3, 3, 7, 3, 1, 7, 15);
let e: [i8; 16] = [0, 1, 2, 3, 2, 3, 6, 7, 2, 3, 6, 7, 10, 1, 14, 15];
let r: [i8; 16] = transmute(vtrn_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrn_s16() {
let a: i16x4 = i16x4::new(0, 2, 2, 6);
let b: i16x4 = i16x4::new(1, 3, 3, 7);
let e: [i16; 8] = [0, 1, 2, 3, 2, 3, 6, 7];
let r: [i16; 8] = transmute(vtrn_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrnq_s8() {
let a: i8x16 = i8x16::new(0, 2, 2, 6, 2, 10, 6, 14, 2, 18, 6, 22, 10, 26, 14, 30);
let b: i8x16 = i8x16::new(1, 3, 3, 7, 3, 1, 7, 15, 3, 19, 7, 23, 1, 27, 15, 31);
let e: [i8; 32] = [0, 1, 2, 3, 2, 3, 6, 7, 2, 3, 6, 7, 10, 1, 14, 15, 2, 3, 6, 7, 10, 1, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31];
let r: [i8; 32] = transmute(vtrnq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrnq_s16() {
let a: i16x8 = i16x8::new(0, 2, 2, 6, 2, 10, 6, 14);
let b: i16x8 = i16x8::new(1, 3, 3, 7, 3, 1, 7, 15);
let e: [i16; 16] = [0, 1, 2, 3, 2, 3, 6, 7, 2, 3, 6, 7, 10, 1, 14, 15];
let r: [i16; 16] = transmute(vtrnq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrnq_s32() {
let a: i32x4 = i32x4::new(0, 2, 2, 6);
let b: i32x4 = i32x4::new(1, 3, 3, 7);
let e: [i32; 8] = [0, 1, 2, 3, 2, 3, 6, 7];
let r: [i32; 8] = transmute(vtrnq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrn_u8() {
let a: u8x8 = u8x8::new(0, 2, 2, 6, 2, 10, 6, 14);
let b: u8x8 = u8x8::new(1, 3, 3, 7, 3, 1, 7, 15);
let e: [u8; 16] = [0, 1, 2, 3, 2, 3, 6, 7, 2, 3, 6, 7, 10, 1, 14, 15];
let r: [u8; 16] = transmute(vtrn_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrn_u16() {
let a: u16x4 = u16x4::new(0, 2, 2, 6);
let b: u16x4 = u16x4::new(1, 3, 3, 7);
let e: [u16; 8] = [0, 1, 2, 3, 2, 3, 6, 7];
let r: [u16; 8] = transmute(vtrn_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrnq_u8() {
let a: u8x16 = u8x16::new(0, 2, 2, 6, 2, 10, 6, 14, 2, 18, 6, 22, 10, 26, 14, 30);
let b: u8x16 = u8x16::new(1, 3, 3, 7, 3, 1, 7, 15, 3, 19, 7, 23, 1, 27, 15, 31);
let e: [u8; 32] = [0, 1, 2, 3, 2, 3, 6, 7, 2, 3, 6, 7, 10, 1, 14, 15, 2, 3, 6, 7, 10, 1, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31];
let r: [u8; 32] = transmute(vtrnq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrnq_u16() {
let a: u16x8 = u16x8::new(0, 2, 2, 6, 2, 10, 6, 14);
let b: u16x8 = u16x8::new(1, 3, 3, 7, 3, 1, 7, 15);
let e: [u16; 16] = [0, 1, 2, 3, 2, 3, 6, 7, 2, 3, 6, 7, 10, 1, 14, 15];
let r: [u16; 16] = transmute(vtrnq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrnq_u32() {
let a: u32x4 = u32x4::new(0, 2, 2, 6);
let b: u32x4 = u32x4::new(1, 3, 3, 7);
let e: [u32; 8] = [0, 1, 2, 3, 2, 3, 6, 7];
let r: [u32; 8] = transmute(vtrnq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrn_p8() {
let a: i8x8 = i8x8::new(0, 2, 2, 6, 2, 10, 6, 14);
let b: i8x8 = i8x8::new(1, 3, 3, 7, 3, 1, 7, 15);
let e: [u8; 16] = [0, 1, 2, 3, 2, 3, 6, 7, 2, 3, 6, 7, 10, 1, 14, 15];
let r: [u8; 16] = transmute(vtrn_p8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrn_p16() {
let a: i16x4 = i16x4::new(0, 2, 2, 6);
let b: i16x4 = i16x4::new(1, 3, 3, 7);
let e: [u16; 8] = [0, 1, 2, 3, 2, 3, 6, 7];
let r: [u16; 8] = transmute(vtrn_p16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrnq_p8() {
let a: i8x16 = i8x16::new(0, 2, 2, 6, 2, 10, 6, 14, 2, 18, 6, 22, 10, 26, 14, 30);
let b: i8x16 = i8x16::new(1, 3, 3, 7, 3, 1, 7, 15, 3, 19, 7, 23, 1, 27, 15, 31);
let e: [u8; 32] = [0, 1, 2, 3, 2, 3, 6, 7, 2, 3, 6, 7, 10, 1, 14, 15, 2, 3, 6, 7, 10, 1, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31];
let r: [u8; 32] = transmute(vtrnq_p8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrnq_p16() {
let a: i16x8 = i16x8::new(0, 2, 2, 6, 2, 10, 6, 14);
let b: i16x8 = i16x8::new(1, 3, 3, 7, 3, 1, 7, 15);
let e: [u16; 16] = [0, 1, 2, 3, 2, 3, 6, 7, 2, 3, 6, 7, 10, 1, 14, 15];
let r: [u16; 16] = transmute(vtrnq_p16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrn_s32() {
let a: i32x2 = i32x2::new(0, 2);
let b: i32x2 = i32x2::new(1, 3);
let e: [i32; 4] = [0, 1, 2, 3];
let r: [i32; 4] = transmute(vtrn_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrn_u32() {
let a: u32x2 = u32x2::new(0, 2);
let b: u32x2 = u32x2::new(1, 3);
let e: [u32; 4] = [0, 1, 2, 3];
let r: [u32; 4] = transmute(vtrn_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrn_f32() {
let a: f32x2 = f32x2::new(0., 2.);
let b: f32x2 = f32x2::new(1., 3.);
let e: [f32; 4] = [0., 1., 2., 3.];
let r: [f32; 4] = transmute(vtrn_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vtrnq_f32() {
let a: f32x4 = f32x4::new(0., 2., 2., 6.);
let b: f32x4 = f32x4::new(1., 3., 3., 7.);
let e: [f32; 8] = [0., 1., 2., 3., 2., 3., 6., 7.];
let r: [f32; 8] = transmute(vtrnq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzip_s8() {
let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
let b: i8x8 = i8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
let e: [i8; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
let r: [i8; 16] = transmute(vzip_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzip_s16() {
let a: i16x4 = i16x4::new(0, 2, 4, 6);
let b: i16x4 = i16x4::new(1, 3, 5, 7);
let e: [i16; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
let r: [i16; 8] = transmute(vzip_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzip_u8() {
let a: u8x8 = u8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
let b: u8x8 = u8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
let e: [u8; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
let r: [u8; 16] = transmute(vzip_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzip_u16() {
let a: u16x4 = u16x4::new(0, 2, 4, 6);
let b: u16x4 = u16x4::new(1, 3, 5, 7);
let e: [u16; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
let r: [u16; 8] = transmute(vzip_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzip_p8() {
let a: i8x8 = i8x8::new(0, 2, 4, 6, 8, 10, 12, 14);
let b: i8x8 = i8x8::new(1, 3, 5, 7, 9, 11, 13, 15);
let e: [u8; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
let r: [u8; 16] = transmute(vzip_p8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzip_p16() {
let a: i16x4 = i16x4::new(0, 2, 4, 6);
let b: i16x4 = i16x4::new(1, 3, 5, 7);
let e: [u16; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
let r: [u16; 8] = transmute(vzip_p16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzip_s32() {
let a: i32x2 = i32x2::new(0, 2);
let b: i32x2 = i32x2::new(1, 3);
let e: [i32; 4] = [0, 1, 2, 3];
let r: [i32; 4] = transmute(vzip_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzip_u32() {
let a: u32x2 = u32x2::new(0, 2);
let b: u32x2 = u32x2::new(1, 3);
let e: [u32; 4] = [0, 1, 2, 3];
let r: [u32; 4] = transmute(vzip_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzipq_s8() {
let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
let b: i8x16 = i8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
let e: [i8; 32] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31];
let r: [i8; 32] = transmute(vzipq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzipq_s16() {
let a: i16x8 = i16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
let b: i16x8 = i16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
let e: [i16; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
let r: [i16; 16] = transmute(vzipq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzipq_s32() {
let a: i32x4 = i32x4::new(0, 2, 4, 6);
let b: i32x4 = i32x4::new(1, 3, 5, 7);
let e: [i32; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
let r: [i32; 8] = transmute(vzipq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzipq_u8() {
let a: u8x16 = u8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
let b: u8x16 = u8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
let e: [u8; 32] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31];
let r: [u8; 32] = transmute(vzipq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzipq_u16() {
let a: u16x8 = u16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
let b: u16x8 = u16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
let e: [u16; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
let r: [u16; 16] = transmute(vzipq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzipq_u32() {
let a: u32x4 = u32x4::new(0, 2, 4, 6);
let b: u32x4 = u32x4::new(1, 3, 5, 7);
let e: [u32; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
let r: [u32; 8] = transmute(vzipq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzipq_p8() {
let a: i8x16 = i8x16::new(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
let b: i8x16 = i8x16::new(1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
let e: [u8; 32] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31];
let r: [u8; 32] = transmute(vzipq_p8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzipq_p16() {
let a: i16x8 = i16x8::new(0, 2, 4, 6, 8, 10, 12, 14);
let b: i16x8 = i16x8::new(1, 3, 5, 7, 9, 11, 13, 15);
let e: [u16; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
let r: [u16; 16] = transmute(vzipq_p16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzip_f32() {
let a: f32x2 = f32x2::new(1., 2.);
let b: f32x2 = f32x2::new(5., 6.);
let e: [f32; 4] = [1., 5., 2., 6.];
let r: [f32; 4] = transmute(vzip_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vzipq_f32() {
let a: f32x4 = f32x4::new(1., 2., 3., 4.);
let b: f32x4 = f32x4::new(5., 6., 7., 8.);
let e: [f32; 8] = [1., 5., 2., 6., 3., 7., 4., 8.];
let r: [f32; 8] = transmute(vzipq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzp_s8() {
let a: i8x8 = i8x8::new(1, 2, 2, 3, 2, 3, 3, 8);
let b: i8x8 = i8x8::new(2, 3, 3, 8, 3, 15, 8, 16);
let e: [i8; 16] = [1, 2, 2, 3, 2, 3, 3, 8, 2, 3, 3, 8, 3, 8, 15, 16];
let r: [i8; 16] = transmute(vuzp_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzp_s16() {
let a: i16x4 = i16x4::new(1, 2, 2, 3);
let b: i16x4 = i16x4::new(2, 3, 3, 8);
let e: [i16; 8] = [1, 2, 2, 3, 2, 3, 3, 8];
let r: [i16; 8] = transmute(vuzp_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzpq_s8() {
let a: i8x16 = i8x16::new(1, 2, 2, 3, 2, 3, 3, 8, 2, 3, 3, 8, 3, 15, 8, 16);
let b: i8x16 = i8x16::new(2, 3, 3, 8, 3, 15, 8, 16, 3, 29, 8, 30, 15, 31, 16, 32);
let e: [i8; 32] = [1, 2, 2, 3, 2, 3, 3, 8, 2, 3, 3, 8, 3, 8, 15, 16, 2, 3, 3, 8, 3, 8, 15, 16, 3, 8, 15, 16, 29, 30, 31, 32];
let r: [i8; 32] = transmute(vuzpq_s8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzpq_s16() {
let a: i16x8 = i16x8::new(1, 2, 2, 3, 2, 3, 3, 8);
let b: i16x8 = i16x8::new(2, 3, 3, 8, 3, 15, 8, 16);
let e: [i16; 16] = [1, 2, 2, 3, 2, 3, 3, 8, 2, 3, 3, 8, 3, 8, 15, 16];
let r: [i16; 16] = transmute(vuzpq_s16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzpq_s32() {
let a: i32x4 = i32x4::new(1, 2, 2, 3);
let b: i32x4 = i32x4::new(2, 3, 3, 8);
let e: [i32; 8] = [1, 2, 2, 3, 2, 3, 3, 8];
let r: [i32; 8] = transmute(vuzpq_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzp_u8() {
let a: u8x8 = u8x8::new(1, 2, 2, 3, 2, 3, 3, 8);
let b: u8x8 = u8x8::new(2, 3, 3, 8, 3, 15, 8, 16);
let e: [u8; 16] = [1, 2, 2, 3, 2, 3, 3, 8, 2, 3, 3, 8, 3, 8, 15, 16];
let r: [u8; 16] = transmute(vuzp_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzp_u16() {
let a: u16x4 = u16x4::new(1, 2, 2, 3);
let b: u16x4 = u16x4::new(2, 3, 3, 8);
let e: [u16; 8] = [1, 2, 2, 3, 2, 3, 3, 8];
let r: [u16; 8] = transmute(vuzp_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzpq_u8() {
let a: u8x16 = u8x16::new(1, 2, 2, 3, 2, 3, 3, 8, 2, 3, 3, 8, 3, 15, 8, 16);
let b: u8x16 = u8x16::new(2, 3, 3, 8, 3, 15, 8, 16, 3, 29, 8, 30, 15, 31, 16, 32);
let e: [u8; 32] = [1, 2, 2, 3, 2, 3, 3, 8, 2, 3, 3, 8, 3, 8, 15, 16, 2, 3, 3, 8, 3, 8, 15, 16, 3, 8, 15, 16, 29, 30, 31, 32];
let r: [u8; 32] = transmute(vuzpq_u8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzpq_u16() {
let a: u16x8 = u16x8::new(1, 2, 2, 3, 2, 3, 3, 8);
let b: u16x8 = u16x8::new(2, 3, 3, 8, 3, 15, 8, 16);
let e: [u16; 16] = [1, 2, 2, 3, 2, 3, 3, 8, 2, 3, 3, 8, 3, 8, 15, 16];
let r: [u16; 16] = transmute(vuzpq_u16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzpq_u32() {
let a: u32x4 = u32x4::new(1, 2, 2, 3);
let b: u32x4 = u32x4::new(2, 3, 3, 8);
let e: [u32; 8] = [1, 2, 2, 3, 2, 3, 3, 8];
let r: [u32; 8] = transmute(vuzpq_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzp_p8() {
let a: i8x8 = i8x8::new(1, 2, 2, 3, 2, 3, 3, 8);
let b: i8x8 = i8x8::new(2, 3, 3, 8, 3, 15, 8, 16);
let e: [u8; 16] = [1, 2, 2, 3, 2, 3, 3, 8, 2, 3, 3, 8, 3, 8, 15, 16];
let r: [u8; 16] = transmute(vuzp_p8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzp_p16() {
let a: i16x4 = i16x4::new(1, 2, 2, 3);
let b: i16x4 = i16x4::new(2, 3, 3, 8);
let e: [u16; 8] = [1, 2, 2, 3, 2, 3, 3, 8];
let r: [u16; 8] = transmute(vuzp_p16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzpq_p8() {
let a: i8x16 = i8x16::new(1, 2, 2, 3, 2, 3, 3, 8, 2, 3, 3, 8, 3, 15, 8, 16);
let b: i8x16 = i8x16::new(2, 3, 3, 8, 3, 15, 8, 16, 3, 29, 8, 30, 15, 31, 16, 32);
let e: [u8; 32] = [1, 2, 2, 3, 2, 3, 3, 8, 2, 3, 3, 8, 3, 8, 15, 16, 2, 3, 3, 8, 3, 8, 15, 16, 3, 8, 15, 16, 29, 30, 31, 32];
let r: [u8; 32] = transmute(vuzpq_p8(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzpq_p16() {
let a: i16x8 = i16x8::new(1, 2, 2, 3, 2, 3, 3, 8);
let b: i16x8 = i16x8::new(2, 3, 3, 8, 3, 15, 8, 16);
let e: [u16; 16] = [1, 2, 2, 3, 2, 3, 3, 8, 2, 3, 3, 8, 3, 8, 15, 16];
let r: [u16; 16] = transmute(vuzpq_p16(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzp_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i32x2 = i32x2::new(2, 3);
let e: [i32; 4] = [1, 2, 2, 3];
let r: [i32; 4] = transmute(vuzp_s32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzp_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u32x2 = u32x2::new(2, 3);
let e: [u32; 4] = [1, 2, 2, 3];
let r: [u32; 4] = transmute(vuzp_u32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzp_f32() {
let a: f32x2 = f32x2::new(1., 2.);
let b: f32x2 = f32x2::new(2., 6.);
let e: [f32; 4] = [1., 2., 2., 6.];
let r: [f32; 4] = transmute(vuzp_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vuzpq_f32() {
let a: f32x4 = f32x4::new(1., 2., 2., 4.);
let b: f32x4 = f32x4::new(2., 6., 6., 8.);
let e: [f32; 8] = [1., 2., 2., 6., 2., 4., 6., 8.];
let r: [f32; 8] = transmute(vuzpq_f32(transmute(a), transmute(b)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabal_u8() {
let a: u16x8 = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let c: u8x8 = u8x8::new(10, 10, 10, 10, 10, 10, 10, 10);
let e: u16x8 = u16x8::new(10, 10, 10, 10, 10, 10, 10, 10);
let r: u16x8 = transmute(vabal_u8(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabal_u16() {
let a: u32x4 = u32x4::new(1, 2, 3, 4);
let b: u16x4 = u16x4::new(1, 2, 3, 4);
let c: u16x4 = u16x4::new(10, 10, 10, 10);
let e: u32x4 = u32x4::new(10, 10, 10, 10);
let r: u32x4 = transmute(vabal_u16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabal_u32() {
let a: u64x2 = u64x2::new(1, 2);
let b: u32x2 = u32x2::new(1, 2);
let c: u32x2 = u32x2::new(10, 10);
let e: u64x2 = u64x2::new(10, 10);
let r: u64x2 = transmute(vabal_u32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabal_s8() {
let a: i16x8 = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
let c: i8x8 = i8x8::new(10, 10, 10, 10, 10, 10, 10, 10);
let e: i16x8 = i16x8::new(10, 10, 10, 10, 10, 10, 10, 10);
let r: i16x8 = transmute(vabal_s8(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabal_s16() {
let a: i32x4 = i32x4::new(1, 2, 3, 4);
let b: i16x4 = i16x4::new(1, 2, 3, 4);
let c: i16x4 = i16x4::new(10, 10, 10, 10);
let e: i32x4 = i32x4::new(10, 10, 10, 10);
let r: i32x4 = transmute(vabal_s16(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vabal_s32() {
let a: i64x2 = i64x2::new(1, 2);
let b: i32x2 = i32x2::new(1, 2);
let c: i32x2 = i32x2::new(10, 10);
let e: i64x2 = i64x2::new(10, 10);
let r: i64x2 = transmute(vabal_s32(transmute(a), transmute(b), transmute(c)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqabs_s8() {
let a: i8x8 = i8x8::new(-128, 0x7F, -6, -5, -4, -3, -2, -1);
let e: i8x8 = i8x8::new(0x7F, 0x7F, 6, 5, 4, 3, 2, 1);
let r: i8x8 = transmute(vqabs_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqabsq_s8() {
let a: i8x16 = i8x16::new(-128, 0x7F, -6, -5, -4, -3, -2, -1, 0, -127, 127, 1, 2, 3, 4, 5);
let e: i8x16 = i8x16::new(0x7F, 0x7F, 6, 5, 4, 3, 2, 1, 0, 127, 127, 1, 2, 3, 4, 5);
let r: i8x16 = transmute(vqabsq_s8(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqabs_s16() {
let a: i16x4 = i16x4::new(-32768, 0x7F_FF, -6, -5);
let e: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 6, 5);
let r: i16x4 = transmute(vqabs_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqabsq_s16() {
let a: i16x8 = i16x8::new(-32768, 0x7F_FF, -6, -5, -4, -3, -2, -1);
let e: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 6, 5, 4, 3, 2, 1);
let r: i16x8 = transmute(vqabsq_s16(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqabs_s32() {
let a: i32x2 = i32x2::new(-2147483648, 0x7F_FF_FF_FF);
let e: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
let r: i32x2 = transmute(vqabs_s32(transmute(a)));
assert_eq!(r, e);
}
#[simd_test(enable = "neon")]
unsafe fn test_vqabsq_s32() {
let a: i32x4 = i32x4::new(-2147483648, 0x7F_FF_FF_FF, -6, -5);
let e: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 6, 5);
let r: i32x4 = transmute(vqabsq_s32(transmute(a)));
assert_eq!(r, e);
}
}