diff --git a/crates/core_arch/README.md b/crates/core_arch/README.md index a5f490bcf5..fc18a5759d 100644 --- a/crates/core_arch/README.md +++ b/crates/core_arch/README.md @@ -7,8 +7,6 @@ The `core::arch` module implements architecture-dependent intrinsics (e.g. SIMD) `core::arch` is available as part of `libcore` and it is re-exported by `libstd`. Prefer using it via `core::arch` or `std::arch` than via this crate. -Unstable features are often available in nightly Rust via the -`feature(stdsimd)`. Using `core::arch` via this crate requires nightly Rust, and it can (and does) break often. The only cases in which you should consider using it via this crate diff --git a/crates/core_arch/src/aarch64/crc.rs b/crates/core_arch/src/aarch64/crc.rs index ac3f8d815c..35940e0db8 100644 --- a/crates/core_arch/src/aarch64/crc.rs +++ b/crates/core_arch/src/aarch64/crc.rs @@ -15,6 +15,7 @@ use stdarch_test::assert_instr; #[inline] #[target_feature(enable = "crc")] #[cfg_attr(test, assert_instr(crc32x))] +#[unstable(feature = "stdarch_arm_crc32", issue = "117215")] pub unsafe fn __crc32d(crc: u32, data: u64) -> u32 { crc32x_(crc, data) } @@ -25,6 +26,7 @@ pub unsafe fn __crc32d(crc: u32, data: u64) -> u32 { #[inline] #[target_feature(enable = "crc")] #[cfg_attr(test, assert_instr(crc32cx))] +#[unstable(feature = "stdarch_arm_crc32", issue = "117215")] pub unsafe fn __crc32cd(crc: u32, data: u64) -> u32 { crc32cx_(crc, data) } diff --git a/crates/core_arch/src/aarch64/mod.rs b/crates/core_arch/src/aarch64/mod.rs index 35095cb96a..fefd2f4780 100644 --- a/crates/core_arch/src/aarch64/mod.rs +++ b/crates/core_arch/src/aarch64/mod.rs @@ -10,17 +10,22 @@ #[cfg(target_endian = "little")] mod neon; #[cfg(target_endian = "little")] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub use self::neon::*; mod tme; +#[unstable(feature = "stdarch_aarch64_tme", issue = "117216")] pub use self::tme::*; mod crc; +#[unstable(feature = "stdarch_arm_crc32", issue = "117215")] pub use self::crc::*; mod prefetch; +#[unstable(feature = "stdarch_aarch64_prefetch", issue = "117217")] pub use self::prefetch::*; +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub use super::arm_shared::*; #[cfg(test)] diff --git a/crates/core_arch/src/aarch64/neon/generated.rs b/crates/core_arch/src/aarch64/neon/generated.rs index 20dec6d80d..0b2d412617 100644 --- a/crates/core_arch/src/aarch64/neon/generated.rs +++ b/crates/core_arch/src/aarch64/neon/generated.rs @@ -15,6 +15,7 @@ use stdarch_test::assert_instr; #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(eor3))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -30,6 +31,7 @@ pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(eor3))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -45,6 +47,7 @@ pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(eor3))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -60,6 +63,7 @@ pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(eor3))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -75,6 +79,7 @@ pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(eor3))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -90,6 +95,7 @@ pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16 #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(eor3))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -105,6 +111,7 @@ pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(eor3))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -120,6 +127,7 @@ pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(eor3))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8360,6 +8368,7 @@ pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { #[target_feature(enable = "neon,i8mm")] #[cfg_attr(test, assert_instr(usdot, LANE = 3))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] pub unsafe fn vusdot_laneq_s32(a: int32x2_t, b: uint8x8_t, c: int8x16_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 2); let c: int32x4_t = transmute(c); @@ -8374,6 +8383,7 @@ pub unsafe fn vusdot_laneq_s32(a: int32x2_t, b: uint8x8_t, c: i #[target_feature(enable = "neon,i8mm")] #[cfg_attr(test, assert_instr(usdot, LANE = 3))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] pub unsafe fn vusdotq_laneq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 2); let c: int32x4_t = transmute(c); @@ -8388,6 +8398,7 @@ pub unsafe fn vusdotq_laneq_s32(a: int32x4_t, b: uint8x16_t, c: #[target_feature(enable = "neon,i8mm")] #[cfg_attr(test, assert_instr(sudot, LANE = 3))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] pub unsafe fn vsudot_laneq_s32(a: int32x2_t, b: int8x8_t, c: uint8x16_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 2); let c: uint32x4_t = transmute(c); @@ -8402,6 +8413,7 @@ pub unsafe fn vsudot_laneq_s32(a: int32x2_t, b: int8x8_t, c: ui #[target_feature(enable = "neon,i8mm")] #[cfg_attr(test, assert_instr(sudot, LANE = 3))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] pub unsafe fn vsudotq_laneq_s32(a: int32x4_t, b: int8x16_t, c: uint8x16_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 2); let c: uint32x4_t = transmute(c); @@ -9965,6 +9977,7 @@ pub unsafe fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(bcax))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9980,6 +9993,7 @@ pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(bcax))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9995,6 +10009,7 @@ pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(bcax))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10010,6 +10025,7 @@ pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(bcax))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10025,6 +10041,7 @@ pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(bcax))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10040,6 +10057,7 @@ pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16 #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(bcax))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10055,6 +10073,7 @@ pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(bcax))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10070,6 +10089,7 @@ pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(bcax))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10085,6 +10105,7 @@ pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcadd))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10100,6 +10121,7 @@ pub unsafe fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcadd))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10115,6 +10137,7 @@ pub unsafe fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcadd))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10130,6 +10153,7 @@ pub unsafe fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcadd))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10145,6 +10169,7 @@ pub unsafe fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcadd))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10160,6 +10185,7 @@ pub unsafe fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcadd))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10175,6 +10201,7 @@ pub unsafe fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10190,6 +10217,7 @@ pub unsafe fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10205,6 +10233,7 @@ pub unsafe fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> floa #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10220,6 +10249,7 @@ pub unsafe fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> floa #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10235,6 +10265,7 @@ pub unsafe fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10250,6 +10281,7 @@ pub unsafe fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) - #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10265,6 +10297,7 @@ pub unsafe fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) - #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10280,6 +10313,7 @@ pub unsafe fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) - #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10295,6 +10329,7 @@ pub unsafe fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10310,6 +10345,7 @@ pub unsafe fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10325,6 +10361,7 @@ pub unsafe fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) - #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10340,6 +10377,7 @@ pub unsafe fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) #[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla))] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10356,6 +10394,7 @@ pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmla_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { static_assert!(LANE == 0); let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); @@ -10369,6 +10408,7 @@ pub unsafe fn vcmla_lane_f32(a: float32x2_t, b: float32x2_t, c: #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmla_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { static_assert_uimm_bits!(LANE, 1); let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); @@ -10382,6 +10422,7 @@ pub unsafe fn vcmla_laneq_f32(a: float32x2_t, b: float32x2_t, c #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmlaq_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { static_assert!(LANE == 0); let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]); @@ -10395,6 +10436,7 @@ pub unsafe fn vcmlaq_lane_f32(a: float32x4_t, b: float32x4_t, c #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmlaq_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { static_assert_uimm_bits!(LANE, 1); let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]); @@ -10408,6 +10450,7 @@ pub unsafe fn vcmlaq_laneq_f32(a: float32x4_t, b: float32x4_t, #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmla_rot90_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { static_assert!(LANE == 0); let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); @@ -10421,6 +10464,7 @@ pub unsafe fn vcmla_rot90_lane_f32(a: float32x2_t, b: float32x2 #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmla_rot90_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { static_assert_uimm_bits!(LANE, 1); let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); @@ -10434,6 +10478,7 @@ pub unsafe fn vcmla_rot90_laneq_f32(a: float32x2_t, b: float32x #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmlaq_rot90_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { static_assert!(LANE == 0); let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]); @@ -10447,6 +10492,7 @@ pub unsafe fn vcmlaq_rot90_lane_f32(a: float32x4_t, b: float32x #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmlaq_rot90_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { static_assert_uimm_bits!(LANE, 1); let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]); @@ -10460,6 +10506,7 @@ pub unsafe fn vcmlaq_rot90_laneq_f32(a: float32x4_t, b: float32 #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmla_rot180_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { static_assert!(LANE == 0); let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); @@ -10473,6 +10520,7 @@ pub unsafe fn vcmla_rot180_lane_f32(a: float32x2_t, b: float32x #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmla_rot180_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { static_assert_uimm_bits!(LANE, 1); let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); @@ -10486,6 +10534,7 @@ pub unsafe fn vcmla_rot180_laneq_f32(a: float32x2_t, b: float32 #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmlaq_rot180_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { static_assert!(LANE == 0); let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]); @@ -10499,6 +10548,7 @@ pub unsafe fn vcmlaq_rot180_lane_f32(a: float32x4_t, b: float32 #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmlaq_rot180_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { static_assert_uimm_bits!(LANE, 1); let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]); @@ -10512,6 +10562,7 @@ pub unsafe fn vcmlaq_rot180_laneq_f32(a: float32x4_t, b: float3 #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmla_rot270_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { static_assert!(LANE == 0); let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); @@ -10525,6 +10576,7 @@ pub unsafe fn vcmla_rot270_lane_f32(a: float32x2_t, b: float32x #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmla_rot270_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { static_assert_uimm_bits!(LANE, 1); let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]); @@ -10538,6 +10590,7 @@ pub unsafe fn vcmla_rot270_laneq_f32(a: float32x2_t, b: float32 #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmlaq_rot270_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { static_assert!(LANE == 0); let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]); @@ -10551,6 +10604,7 @@ pub unsafe fn vcmlaq_rot270_lane_f32(a: float32x4_t, b: float32 #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_fcma", issue = "117222")] pub unsafe fn vcmlaq_rot270_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { static_assert_uimm_bits!(LANE, 1); let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]); @@ -10564,6 +10618,7 @@ pub unsafe fn vcmlaq_rot270_laneq_f32(a: float32x4_t, b: float3 #[target_feature(enable = "neon,dotprod")] #[cfg_attr(test, assert_instr(sdot, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] pub unsafe fn vdot_laneq_s32(a: int32x2_t, b: int8x8_t, c: int8x16_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 2); let c: int32x4_t = transmute(c); @@ -10578,6 +10633,7 @@ pub unsafe fn vdot_laneq_s32(a: int32x2_t, b: int8x8_t, c: int8 #[target_feature(enable = "neon,dotprod")] #[cfg_attr(test, assert_instr(sdot, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] pub unsafe fn vdotq_laneq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 2); let c: int32x4_t = transmute(c); @@ -10592,6 +10648,7 @@ pub unsafe fn vdotq_laneq_s32(a: int32x4_t, b: int8x16_t, c: in #[target_feature(enable = "neon,dotprod")] #[cfg_attr(test, assert_instr(udot, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] pub unsafe fn vdot_laneq_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x16_t) -> uint32x2_t { static_assert_uimm_bits!(LANE, 2); let c: uint32x4_t = transmute(c); @@ -10606,6 +10663,7 @@ pub unsafe fn vdot_laneq_u32(a: uint32x2_t, b: uint8x8_t, c: ui #[target_feature(enable = "neon,dotprod")] #[cfg_attr(test, assert_instr(udot, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] pub unsafe fn vdotq_laneq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 2); let c: uint32x4_t = transmute(c); @@ -15123,6 +15181,7 @@ pub unsafe fn vshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> ui #[inline] #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm3partw1))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15138,6 +15197,7 @@ pub unsafe fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> ui #[inline] #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm3partw2))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15153,6 +15213,7 @@ pub unsafe fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> ui #[inline] #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm3ss1))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15168,6 +15229,7 @@ pub unsafe fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint3 #[inline] #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm4ekey))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15183,6 +15245,7 @@ pub unsafe fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm4e))] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15198,6 +15261,7 @@ pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(rax1))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15213,6 +15277,7 @@ pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(sha512h))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15228,6 +15293,7 @@ pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(sha512h2))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15243,6 +15309,7 @@ pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uin #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(sha512su0))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15258,6 +15325,7 @@ pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(sha512su1))] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15273,6 +15341,7 @@ pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> ui #[inline] #[target_feature(enable = "neon,frintts")] #[cfg_attr(test, assert_instr(frint32x))] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15288,6 +15357,7 @@ pub unsafe fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon,frintts")] #[cfg_attr(test, assert_instr(frint32x))] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15303,6 +15373,7 @@ pub unsafe fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon,frintts")] #[cfg_attr(test, assert_instr(frint32x))] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15318,6 +15389,7 @@ pub unsafe fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,frintts")] #[cfg_attr(test, assert_instr(frint32x))] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] pub unsafe fn vrnd32x_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15333,6 +15405,7 @@ pub unsafe fn vrnd32x_f64(a: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon,frintts")] #[cfg_attr(test, assert_instr(frint32z))] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15348,6 +15421,7 @@ pub unsafe fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon,frintts")] #[cfg_attr(test, assert_instr(frint32z))] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15363,6 +15437,7 @@ pub unsafe fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon,frintts")] #[cfg_attr(test, assert_instr(frint32z))] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15378,6 +15453,7 @@ pub unsafe fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,frintts")] #[cfg_attr(test, assert_instr(frint32z))] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] pub unsafe fn vrnd32z_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15393,6 +15469,7 @@ pub unsafe fn vrnd32z_f64(a: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon,frintts")] #[cfg_attr(test, assert_instr(frint64x))] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15408,6 +15485,7 @@ pub unsafe fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon,frintts")] #[cfg_attr(test, assert_instr(frint64x))] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15423,6 +15501,7 @@ pub unsafe fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon,frintts")] #[cfg_attr(test, assert_instr(frint64x))] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15438,6 +15517,7 @@ pub unsafe fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,frintts")] #[cfg_attr(test, assert_instr(frint64x))] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] pub unsafe fn vrnd64x_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15453,6 +15533,7 @@ pub unsafe fn vrnd64x_f64(a: float64x1_t) -> float64x1_t { #[inline] #[target_feature(enable = "neon,frintts")] #[cfg_attr(test, assert_instr(frint64z))] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15468,6 +15549,7 @@ pub unsafe fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { #[inline] #[target_feature(enable = "neon,frintts")] #[cfg_attr(test, assert_instr(frint64z))] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15483,6 +15565,7 @@ pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { #[inline] #[target_feature(enable = "neon,frintts")] #[cfg_attr(test, assert_instr(frint64z))] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15498,6 +15581,7 @@ pub unsafe fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { #[inline] #[target_feature(enable = "neon,frintts")] #[cfg_attr(test, assert_instr(frint64z))] +#[unstable(feature = "stdarch_neon_ftts", issue = "117227")] pub unsafe fn vrnd64z_f64(a: float64x1_t) -> float64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { diff --git a/crates/core_arch/src/aarch64/neon/mod.rs b/crates/core_arch/src/aarch64/neon/mod.rs index a79d9dc1d3..8ab7711d0b 100644 --- a/crates/core_arch/src/aarch64/neon/mod.rs +++ b/crates/core_arch/src/aarch64/neon/mod.rs @@ -5,6 +5,7 @@ #[rustfmt::skip] mod generated; #[rustfmt::skip] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] pub use self::generated::*; // FIXME: replace neon with asimd @@ -3452,6 +3453,7 @@ pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm3tt1aq_u32( a: uint32x4_t, b: uint32x4_t, @@ -3471,6 +3473,7 @@ pub unsafe fn vsm3tt1aq_u32( #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm3tt1bq_u32( a: uint32x4_t, b: uint32x4_t, @@ -3490,6 +3493,7 @@ pub unsafe fn vsm3tt1bq_u32( #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm3tt2aq_u32( a: uint32x4_t, b: uint32x4_t, @@ -3509,6 +3513,7 @@ pub unsafe fn vsm3tt2aq_u32( #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))] #[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_neon_sm4", issue = "117226")] pub unsafe fn vsm3tt2bq_u32( a: uint32x4_t, b: uint32x4_t, @@ -3528,6 +3533,7 @@ pub unsafe fn vsm3tt2bq_u32( #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(xar, IMM6 = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_neon_sha3", issue = "117225")] pub unsafe fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert_uimm_bits!(IMM6, 6); #[allow(improper_ctypes)] diff --git a/crates/core_arch/src/aarch64/prefetch.rs b/crates/core_arch/src/aarch64/prefetch.rs index 0e2e39cc2e..1fde2ac4df 100644 --- a/crates/core_arch/src/aarch64/prefetch.rs +++ b/crates/core_arch/src/aarch64/prefetch.rs @@ -7,21 +7,27 @@ extern "unadjusted" { } /// See [`prefetch`](fn._prefetch.html). +#[unstable(feature = "stdarch_aarch64_prefetch", issue = "117217")] pub const _PREFETCH_READ: i32 = 0; /// See [`prefetch`](fn._prefetch.html). +#[unstable(feature = "stdarch_aarch64_prefetch", issue = "117217")] pub const _PREFETCH_WRITE: i32 = 1; /// See [`prefetch`](fn._prefetch.html). +#[unstable(feature = "stdarch_aarch64_prefetch", issue = "117217")] pub const _PREFETCH_LOCALITY0: i32 = 0; /// See [`prefetch`](fn._prefetch.html). +#[unstable(feature = "stdarch_aarch64_prefetch", issue = "117217")] pub const _PREFETCH_LOCALITY1: i32 = 1; /// See [`prefetch`](fn._prefetch.html). +#[unstable(feature = "stdarch_aarch64_prefetch", issue = "117217")] pub const _PREFETCH_LOCALITY2: i32 = 2; /// See [`prefetch`](fn._prefetch.html). +#[unstable(feature = "stdarch_aarch64_prefetch", issue = "117217")] pub const _PREFETCH_LOCALITY3: i32 = 3; /// Fetch the cache line that contains address `p` using the given `RW` and `LOCALITY`. @@ -64,6 +70,7 @@ pub const _PREFETCH_LOCALITY3: i32 = 3; #[cfg_attr(test, assert_instr("prfm pstl2keep", RW = _PREFETCH_WRITE, LOCALITY = _PREFETCH_LOCALITY2))] #[cfg_attr(test, assert_instr("prfm pstl1keep", RW = _PREFETCH_WRITE, LOCALITY = _PREFETCH_LOCALITY3))] #[rustc_legacy_const_generics(1, 2)] +#[unstable(feature = "stdarch_aarch64_prefetch", issue = "117217")] // FIXME: Replace this with the standard ACLE __pld/__pldx/__pli/__plix intrinsics pub unsafe fn _prefetch(p: *const i8) { // We use the `llvm.prefetch` intrinsic with `cache type` = 1 (data cache). diff --git a/crates/core_arch/src/aarch64/tme.rs b/crates/core_arch/src/aarch64/tme.rs index 15f1b877d6..96432fa79f 100644 --- a/crates/core_arch/src/aarch64/tme.rs +++ b/crates/core_arch/src/aarch64/tme.rs @@ -29,39 +29,51 @@ extern "unadjusted" { } /// Transaction successfully started. +#[unstable(feature = "stdarch_aarch64_tme", issue = "117216")] pub const _TMSTART_SUCCESS: u64 = 0x00_u64; /// Extraction mask for failure reason +#[unstable(feature = "stdarch_aarch64_tme", issue = "117216")] pub const _TMFAILURE_REASON: u64 = 0x00007FFF_u64; /// Transaction retry is possible. +#[unstable(feature = "stdarch_aarch64_tme", issue = "117216")] pub const _TMFAILURE_RTRY: u64 = 1 << 15; /// Transaction executed a TCANCEL instruction +#[unstable(feature = "stdarch_aarch64_tme", issue = "117216")] pub const _TMFAILURE_CNCL: u64 = 1 << 16; /// Transaction aborted because a conflict occurred +#[unstable(feature = "stdarch_aarch64_tme", issue = "117216")] pub const _TMFAILURE_MEM: u64 = 1 << 17; /// Fallback error type for any other reason +#[unstable(feature = "stdarch_aarch64_tme", issue = "117216")] pub const _TMFAILURE_IMP: u64 = 1 << 18; /// Transaction aborted because a non-permissible operation was attempted +#[unstable(feature = "stdarch_aarch64_tme", issue = "117216")] pub const _TMFAILURE_ERR: u64 = 1 << 19; /// Transaction aborted due to read or write set limit was exceeded +#[unstable(feature = "stdarch_aarch64_tme", issue = "117216")] pub const _TMFAILURE_SIZE: u64 = 1 << 20; /// Transaction aborted due to transactional nesting level was exceeded +#[unstable(feature = "stdarch_aarch64_tme", issue = "117216")] pub const _TMFAILURE_NEST: u64 = 1 << 21; /// Transaction aborted due to a debug trap. +#[unstable(feature = "stdarch_aarch64_tme", issue = "117216")] pub const _TMFAILURE_DBG: u64 = 1 << 22; /// Transaction failed from interrupt +#[unstable(feature = "stdarch_aarch64_tme", issue = "117216")] pub const _TMFAILURE_INT: u64 = 1 << 23; /// Indicates a TRIVIAL version of TM is available +#[unstable(feature = "stdarch_aarch64_tme", issue = "117216")] pub const _TMFAILURE_TRIVIAL: u64 = 1 << 24; /// Starts a new transaction. When the transaction starts successfully the return value is 0. @@ -72,6 +84,7 @@ pub const _TMFAILURE_TRIVIAL: u64 = 1 << 24; #[inline] #[target_feature(enable = "tme")] #[cfg_attr(test, assert_instr(tstart))] +#[unstable(feature = "stdarch_aarch64_tme", issue = "117216")] pub unsafe fn __tstart() -> u64 { aarch64_tstart() } @@ -84,6 +97,7 @@ pub unsafe fn __tstart() -> u64 { #[inline] #[target_feature(enable = "tme")] #[cfg_attr(test, assert_instr(tcommit))] +#[unstable(feature = "stdarch_aarch64_tme", issue = "117216")] pub unsafe fn __tcommit() { aarch64_tcommit() } @@ -95,6 +109,7 @@ pub unsafe fn __tcommit() { #[target_feature(enable = "tme")] #[cfg_attr(test, assert_instr(tcancel, IMM16 = 0x0))] #[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_aarch64_tme", issue = "117216")] pub unsafe fn __tcancel() { static_assert!(IMM16 <= 65535); aarch64_tcancel(IMM16); @@ -107,6 +122,7 @@ pub unsafe fn __tcancel() { #[inline] #[target_feature(enable = "tme")] #[cfg_attr(test, assert_instr(ttest))] +#[unstable(feature = "stdarch_aarch64_tme", issue = "117216")] pub unsafe fn __ttest() -> u64 { aarch64_ttest() } diff --git a/crates/core_arch/src/arm/dsp.rs b/crates/core_arch/src/arm/dsp.rs index 6720f97a53..6d9c0138a1 100644 --- a/crates/core_arch/src/arm/dsp.rs +++ b/crates/core_arch/src/arm/dsp.rs @@ -27,8 +27,10 @@ use crate::mem::transmute; types! { /// ARM-specific 32-bit wide vector of two packed `i16`. + #[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub struct int16x2_t(i16, i16); /// ARM-specific 32-bit wide vector of two packed `u16`. + #[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub struct uint16x2_t(u16, u16); } @@ -82,6 +84,7 @@ extern "unadjusted" { /// where \[0\] is the lower 16 bits and \[1\] is the upper 16 bits. #[inline] #[cfg_attr(test, assert_instr(smulbb))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smulbb(a: int16x2_t, b: int16x2_t) -> i32 { arm_smulbb(transmute(a), transmute(b)) } @@ -92,6 +95,7 @@ pub unsafe fn __smulbb(a: int16x2_t, b: int16x2_t) -> i32 { /// where \[0\] is the lower 16 bits and \[1\] is the upper 16 bits. #[inline] #[cfg_attr(test, assert_instr(smultb))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smultb(a: int16x2_t, b: int16x2_t) -> i32 { arm_smultb(transmute(a), transmute(b)) } @@ -102,6 +106,7 @@ pub unsafe fn __smultb(a: int16x2_t, b: int16x2_t) -> i32 { /// where \[0\] is the lower 16 bits and \[1\] is the upper 16 bits. #[inline] #[cfg_attr(test, assert_instr(smulbt))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smulbt(a: int16x2_t, b: int16x2_t) -> i32 { arm_smulbt(transmute(a), transmute(b)) } @@ -112,6 +117,7 @@ pub unsafe fn __smulbt(a: int16x2_t, b: int16x2_t) -> i32 { /// where \[0\] is the lower 16 bits and \[1\] is the upper 16 bits. #[inline] #[cfg_attr(test, assert_instr(smultt))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smultt(a: int16x2_t, b: int16x2_t) -> i32 { arm_smultt(transmute(a), transmute(b)) } @@ -123,6 +129,7 @@ pub unsafe fn __smultt(a: int16x2_t, b: int16x2_t) -> i32 { /// Return the top 32 bits of the 48-bit product #[inline] #[cfg_attr(test, assert_instr(smulwb))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smulwb(a: int16x2_t, b: i32) -> i32 { arm_smulwb(transmute(a), b) } @@ -134,6 +141,7 @@ pub unsafe fn __smulwb(a: int16x2_t, b: i32) -> i32 { /// Return the top 32 bits of the 48-bit product #[inline] #[cfg_attr(test, assert_instr(smulwt))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smulwt(a: int16x2_t, b: i32) -> i32 { arm_smulwt(transmute(a), b) } @@ -144,6 +152,7 @@ pub unsafe fn __smulwt(a: int16x2_t, b: i32) -> i32 { /// Sets the Q flag if saturation occurs. #[inline] #[cfg_attr(test, assert_instr(qadd))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __qadd(a: i32, b: i32) -> i32 { arm_qadd(a, b) } @@ -154,6 +163,7 @@ pub unsafe fn __qadd(a: i32, b: i32) -> i32 { /// Sets the Q flag if saturation occurs. #[inline] #[cfg_attr(test, assert_instr(qsub))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __qsub(a: i32, b: i32) -> i32 { arm_qsub(a, b) } @@ -164,6 +174,7 @@ pub unsafe fn __qsub(a: i32, b: i32) -> i32 { /// Sets the Q flag if saturation occurs. #[inline] #[cfg_attr(test, assert_instr(qadd))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __qdbl(a: i32) -> i32 { arm_qadd(a, a) } @@ -175,6 +186,7 @@ pub unsafe fn __qdbl(a: i32) -> i32 { /// Sets the Q flag if overflow occurs on the addition. #[inline] #[cfg_attr(test, assert_instr(smlabb))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smlabb(a: int16x2_t, b: int16x2_t, c: i32) -> i32 { arm_smlabb(transmute(a), transmute(b), c) } @@ -186,6 +198,7 @@ pub unsafe fn __smlabb(a: int16x2_t, b: int16x2_t, c: i32) -> i32 { /// Sets the Q flag if overflow occurs on the addition. #[inline] #[cfg_attr(test, assert_instr(smlabt))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smlabt(a: int16x2_t, b: int16x2_t, c: i32) -> i32 { arm_smlabt(transmute(a), transmute(b), c) } @@ -197,6 +210,7 @@ pub unsafe fn __smlabt(a: int16x2_t, b: int16x2_t, c: i32) -> i32 { /// Sets the Q flag if overflow occurs on the addition. #[inline] #[cfg_attr(test, assert_instr(smlatb))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smlatb(a: int16x2_t, b: int16x2_t, c: i32) -> i32 { arm_smlatb(transmute(a), transmute(b), c) } @@ -208,6 +222,7 @@ pub unsafe fn __smlatb(a: int16x2_t, b: int16x2_t, c: i32) -> i32 { /// Sets the Q flag if overflow occurs on the addition. #[inline] #[cfg_attr(test, assert_instr(smlatt))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smlatt(a: int16x2_t, b: int16x2_t, c: i32) -> i32 { arm_smlatt(transmute(a), transmute(b), c) } @@ -219,6 +234,7 @@ pub unsafe fn __smlatt(a: int16x2_t, b: int16x2_t, c: i32) -> i32 { /// Sets the Q flag if overflow occurs on the addition. #[inline] #[cfg_attr(test, assert_instr(smlawb))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smlawb(a: i32, b: int16x2_t, c: i32) -> i32 { arm_smlawb(a, transmute(b), c) } @@ -230,6 +246,7 @@ pub unsafe fn __smlawb(a: i32, b: int16x2_t, c: i32) -> i32 { /// Sets the Q flag if overflow occurs on the addition. #[inline] #[cfg_attr(test, assert_instr(smlawt))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smlawt(a: i32, b: int16x2_t, c: i32) -> i32 { arm_smlawt(a, transmute(b), c) } diff --git a/crates/core_arch/src/arm/mod.rs b/crates/core_arch/src/arm/mod.rs index b6e0019d2a..217906c78d 100644 --- a/crates/core_arch/src/arm/mod.rs +++ b/crates/core_arch/src/arm/mod.rs @@ -11,6 +11,8 @@ mod sat; #[cfg(any(target_feature = "v6", doc))] +// Remove warning because this module is currently empty. +#[allow(unused_imports)] pub use self::sat::*; // Supported arches: 5TE, 7E-M. See Section 10.1 of ACLE (e.g. QADD) @@ -25,7 +27,7 @@ pub use self::sat::*; all(target_feature = "mclass", target_feature = "dsp"), doc, ))] -pub mod dsp; +mod dsp; #[cfg(any( // >= v5TE but excludes v7-M @@ -34,6 +36,7 @@ pub mod dsp; all(target_feature = "mclass", target_feature = "dsp"), doc, ))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub use self::dsp::*; // Deprecated in ACLE 2.0 for the A profile but fully supported on the M and R profiles, says @@ -54,8 +57,10 @@ mod simd32; all(target_feature = "mclass", target_feature = "dsp"), doc, ))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub use self::simd32::*; +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub use crate::core_arch::arm_shared::*; #[cfg(test)] @@ -67,36 +72,5 @@ use stdarch_test::assert_instr; pub(crate) mod neon; #[cfg(target_endian = "little")] #[cfg(any(target_feature = "v7", doc))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub use neon::*; - -/// Generates the trap instruction `UDF` -#[cfg(target_arch = "arm")] -#[cfg_attr(test, assert_instr(udf))] -#[inline] -pub unsafe fn udf() -> ! { - crate::intrinsics::abort() -} - -/// Generates a DBG instruction. -/// -/// This provides a hint to debugging and related systems. The argument must be -/// a constant integer from 0 to 15 inclusive. See implementation documentation -/// for the effect (if any) of this instruction and the meaning of the -/// argument. This is available only when compiling for AArch32. -// Section 10.1 of ACLE says that the supported arches are: 7, 7-M -// "The DBG hint instruction is added in ARMv7. It is UNDEFINED in the ARMv6 base architecture, and -// executes as a NOP instruction in ARMv6K and ARMv6T2." - ARM Architecture Reference Manual ARMv7-A -// and ARMv7-R edition (ARM DDI 0406C.c) sections D12.4.1 "ARM instruction set support" and D12.4.2 -// "Thumb instruction set support" -#[cfg(any(target_feature = "v7", doc))] -#[inline(always)] -#[rustc_legacy_const_generics(0)] -pub unsafe fn __dbg() { - static_assert_uimm_bits!(IMM4, 4); - dbg(IMM4); -} - -extern "unadjusted" { - #[link_name = "llvm.arm.dbg"] - fn dbg(_: i32); -} diff --git a/crates/core_arch/src/arm/neon.rs b/crates/core_arch/src/arm/neon.rs index 2e434a2898..b0a4f3706b 100644 --- a/crates/core_arch/src/arm/neon.rs +++ b/crates/core_arch/src/arm/neon.rs @@ -125,6 +125,7 @@ extern "unadjusted" { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vld1.8"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { vld1_v8i8(ptr as *const i8, align_of::() as i32) } @@ -133,6 +134,7 @@ pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vld1.8"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { vld1q_v16i8(ptr as *const i8, align_of::() as i32) } @@ -141,6 +143,7 @@ pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vld1.16"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { vld1_v4i16(ptr as *const i8, align_of::() as i32) } @@ -149,6 +152,7 @@ pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vld1.16"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { vld1q_v8i16(ptr as *const i8, align_of::() as i32) } @@ -157,6 +161,7 @@ pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vldr))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { vld1_v2i32(ptr as *const i8, align_of::() as i32) } @@ -165,6 +170,7 @@ pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vld1.32"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { vld1q_v4i32(ptr as *const i8, align_of::() as i32) } @@ -173,6 +179,7 @@ pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vldr))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { vld1_v1i64(ptr as *const i8, align_of::() as i32) } @@ -181,6 +188,7 @@ pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vld1.64"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { vld1q_v2i64(ptr as *const i8, align_of::() as i32) } @@ -189,6 +197,7 @@ pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vld1.8"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { transmute(vld1_v8i8(ptr as *const i8, align_of::() as i32)) } @@ -197,6 +206,7 @@ pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vld1.8"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { transmute(vld1q_v16i8(ptr as *const i8, align_of::() as i32)) } @@ -205,6 +215,7 @@ pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vld1.16"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { transmute(vld1_v4i16(ptr as *const i8, align_of::() as i32)) } @@ -213,6 +224,7 @@ pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vld1.16"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { transmute(vld1q_v8i16(ptr as *const i8, align_of::() as i32)) } @@ -221,6 +233,7 @@ pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vldr))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { transmute(vld1_v2i32(ptr as *const i8, align_of::() as i32)) } @@ -229,6 +242,7 @@ pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vld1.32"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { transmute(vld1q_v4i32(ptr as *const i8, align_of::() as i32)) } @@ -237,6 +251,7 @@ pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vldr))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { transmute(vld1_v1i64(ptr as *const i8, align_of::() as i32)) } @@ -245,6 +260,7 @@ pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vld1.64"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { transmute(vld1q_v2i64(ptr as *const i8, align_of::() as i32)) } @@ -253,6 +269,7 @@ pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vld1.8"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { transmute(vld1_v8i8(ptr as *const i8, align_of::() as i32)) } @@ -261,6 +278,7 @@ pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vld1.8"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { transmute(vld1q_v16i8(ptr as *const i8, align_of::() as i32)) } @@ -269,6 +287,7 @@ pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vld1.16"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { transmute(vld1_v4i16(ptr as *const i8, align_of::() as i32)) } @@ -277,6 +296,7 @@ pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vld1.16"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { transmute(vld1q_v8i16(ptr as *const i8, align_of::() as i32)) } @@ -287,6 +307,7 @@ pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { #[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(vldr))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { transmute(vld1_v1i64(ptr as *const i8, align_of::() as i32)) } @@ -297,6 +318,7 @@ pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { #[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr("vld1.64"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { transmute(vld1q_v2i64(ptr as *const i8, align_of::() as i32)) } @@ -305,6 +327,7 @@ pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vldr))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { vld1_v2f32(ptr as *const i8, align_of::() as i32) } @@ -313,6 +336,7 @@ pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vld1.32"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { vld1q_v4f32(ptr as *const i8, align_of::() as i32) } @@ -321,6 +345,7 @@ pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.8"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { vst1_v8i8(ptr as *const i8, a, align_of::() as i32) } @@ -329,6 +354,7 @@ pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.8"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { vst1q_v16i8(ptr as *const i8, a, align_of::() as i32) } @@ -337,6 +363,7 @@ pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.16"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { vst1_v4i16(ptr as *const i8, a, align_of::() as i32) } @@ -345,6 +372,7 @@ pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.16"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { vst1q_v8i16(ptr as *const i8, a, align_of::() as i32) } @@ -353,6 +381,7 @@ pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.32"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { vst1_v2i32(ptr as *const i8, a, align_of::() as i32) } @@ -361,6 +390,7 @@ pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.32"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { vst1q_v4i32(ptr as *const i8, a, align_of::() as i32) } @@ -369,6 +399,7 @@ pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.64"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { vst1_v1i64(ptr as *const i8, a, align_of::() as i32) } @@ -377,6 +408,7 @@ pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.64"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { vst1q_v2i64(ptr as *const i8, a, align_of::() as i32) } @@ -385,6 +417,7 @@ pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.8"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { vst1_v8i8(ptr as *const i8, transmute(a), align_of::() as i32) } @@ -393,6 +426,7 @@ pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.8"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { vst1q_v16i8(ptr as *const i8, transmute(a), align_of::() as i32) } @@ -401,6 +435,7 @@ pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.16"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { vst1_v4i16(ptr as *const i8, transmute(a), align_of::() as i32) } @@ -409,6 +444,7 @@ pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.16"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { vst1q_v8i16(ptr as *const i8, transmute(a), align_of::() as i32) } @@ -417,6 +453,7 @@ pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.32"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { vst1_v2i32(ptr as *const i8, transmute(a), align_of::() as i32) } @@ -425,6 +462,7 @@ pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.32"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { vst1q_v4i32(ptr as *const i8, transmute(a), align_of::() as i32) } @@ -433,6 +471,7 @@ pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.64"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { vst1_v1i64(ptr as *const i8, transmute(a), align_of::() as i32) } @@ -441,6 +480,7 @@ pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.64"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { vst1q_v2i64(ptr as *const i8, transmute(a), align_of::() as i32) } @@ -449,6 +489,7 @@ pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.8"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { vst1_v8i8(ptr as *const i8, transmute(a), align_of::() as i32) } @@ -457,6 +498,7 @@ pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.8"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { vst1q_v16i8(ptr as *const i8, transmute(a), align_of::() as i32) } @@ -465,6 +507,7 @@ pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.16"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { vst1_v4i16(ptr as *const i8, transmute(a), align_of::() as i32) } @@ -473,6 +516,7 @@ pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.16"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { vst1q_v8i16(ptr as *const i8, transmute(a), align_of::() as i32) } @@ -483,6 +527,7 @@ pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { #[inline] #[target_feature(enable = "neon,aes,v8")] #[cfg_attr(test, assert_instr("vst1.64"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { vst1_v1i64(ptr as *const i8, transmute(a), align_of::() as i32) } @@ -493,6 +538,7 @@ pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { #[inline] #[target_feature(enable = "neon,aes,v8")] #[cfg_attr(test, assert_instr("vst1.64"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { vst1q_v2i64(ptr as *const i8, transmute(a), align_of::() as i32) } @@ -501,6 +547,7 @@ pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.32"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { vst1_v2f32(ptr as *const i8, a, align_of::() as i32) } @@ -509,6 +556,7 @@ pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vst1.32"))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { vst1q_v4f32(ptr as *const i8, a, align_of::() as i32) } @@ -517,6 +565,7 @@ pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbl))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { vtbl1(a, b) } @@ -525,6 +574,7 @@ pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbl))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { transmute(vtbl1(transmute(a), transmute(b))) } @@ -533,6 +583,7 @@ pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbl))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { transmute(vtbl1(transmute(a), transmute(b))) } @@ -541,6 +592,7 @@ pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbl))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { vtbl2(a.0, a.1, b) } @@ -549,6 +601,7 @@ pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbl))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))) } @@ -557,6 +610,7 @@ pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbl))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))) } @@ -565,6 +619,7 @@ pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbl))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { vtbl3(a.0, a.1, a.2, b) } @@ -573,6 +628,7 @@ pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbl))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { transmute(vtbl3( transmute(a.0), @@ -586,6 +642,7 @@ pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbl))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { transmute(vtbl3( transmute(a.0), @@ -599,6 +656,7 @@ pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbl))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { vtbl4(a.0, a.1, a.2, a.3, b) } @@ -607,6 +665,7 @@ pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbl))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { transmute(vtbl4( transmute(a.0), @@ -621,6 +680,7 @@ pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbl))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { transmute(vtbl4( transmute(a.0), @@ -635,6 +695,7 @@ pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbx))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { vtbx1(a, b, c) } @@ -643,6 +704,7 @@ pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbx))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { transmute(vtbx1(transmute(a), transmute(b), transmute(c))) } @@ -651,6 +713,7 @@ pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbx))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { transmute(vtbx1(transmute(a), transmute(b), transmute(c))) } @@ -659,6 +722,7 @@ pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbx))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { vtbx2(a, b.0, b.1, c) } @@ -667,6 +731,7 @@ pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbx))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { transmute(vtbx2( transmute(a), @@ -680,6 +745,7 @@ pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbx))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { transmute(vtbx2( transmute(a), @@ -693,6 +759,7 @@ pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbx))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { vtbx3(a, b.0, b.1, b.2, c) } @@ -701,6 +768,7 @@ pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbx))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { transmute(vtbx3( transmute(a), @@ -715,6 +783,7 @@ pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbx))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { transmute(vtbx3( transmute(a), @@ -729,6 +798,7 @@ pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbx))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { vtbx4(a, b.0, b.1, b.2, b.3, c) } @@ -737,6 +807,7 @@ pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbx))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { transmute(vtbx4( transmute(a), @@ -752,6 +823,7 @@ pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t #[inline] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vtbx))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { transmute(vtbx4( transmute(a), @@ -768,6 +840,7 @@ pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert_uimm_bits!(N, 3); let n = N as i8; @@ -778,6 +851,7 @@ pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert_uimm_bits!(N, 3); let n = N as i8; @@ -792,6 +866,7 @@ pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_uimm_bits!(N, 4); let n = N as i16; @@ -802,6 +877,7 @@ pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_uimm_bits!(N, 4); let n = N as i16; @@ -812,6 +888,7 @@ pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N >= 0 && N <= 31); vshiftins_v2i32(a, b, int32x2_t(N, N)) @@ -821,6 +898,7 @@ pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N >= 0 && N <= 31); vshiftins_v4i32(a, b, int32x4_t(N, N, N, N)) @@ -830,6 +908,7 @@ pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(0 <= N && N <= 63); vshiftins_v1i64(a, b, int64x1_t(N as i64)) @@ -839,6 +918,7 @@ pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(0 <= N && N <= 63); vshiftins_v2i64(a, b, int64x2_t(N as i64, N as i64)) @@ -848,6 +928,7 @@ pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); let n = N as i8; @@ -862,6 +943,7 @@ pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert_uimm_bits!(N, 3); let n = N as i8; @@ -876,6 +958,7 @@ pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert_uimm_bits!(N, 4); let n = N as i16; @@ -890,6 +973,7 @@ pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert_uimm_bits!(N, 4); let n = N as i16; @@ -904,6 +988,7 @@ pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert!(N >= 0 && N <= 31); transmute(vshiftins_v2i32(transmute(a), transmute(b), int32x2_t(N, N))) @@ -913,6 +998,7 @@ pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.32", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert!(N >= 0 && N <= 31); transmute(vshiftins_v4i32( @@ -926,6 +1012,7 @@ pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { static_assert!(0 <= N && N <= 63); transmute(vshiftins_v1i64( @@ -939,6 +1026,7 @@ pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert!(0 <= N && N <= 63); transmute(vshiftins_v2i64( @@ -952,6 +1040,7 @@ pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { static_assert_uimm_bits!(N, 3); let n = N as i8; @@ -966,6 +1055,7 @@ pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.8", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { static_assert_uimm_bits!(N, 3); let n = N as i8; @@ -980,6 +1070,7 @@ pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { static_assert_uimm_bits!(N, 4); let n = N as i16; @@ -995,6 +1086,7 @@ pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsli.16", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { static_assert_uimm_bits!(N, 4); let n = N as i16; @@ -1012,6 +1104,7 @@ pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x #[target_feature(enable = "neon,v7,aes")] #[cfg_attr(test, assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { static_assert!(0 <= N && N <= 63); transmute(vshiftins_v1i64( @@ -1028,6 +1121,7 @@ pub unsafe fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1 #[target_feature(enable = "neon,v7,aes")] #[cfg_attr(test, assert_instr("vsli.64", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { static_assert!(0 <= N && N <= 63); transmute(vshiftins_v2i64( @@ -1041,6 +1135,7 @@ pub unsafe fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(1 <= N && N <= 8); let n = -N as i8; @@ -1051,6 +1146,7 @@ pub unsafe fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert!(1 <= N && N <= 8); let n = -N as i8; @@ -1065,6 +1161,7 @@ pub unsafe fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert!(1 <= N && N <= 16); let n = -N as i16; @@ -1075,6 +1172,7 @@ pub unsafe fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert!(1 <= N && N <= 16); let n = -N as i16; @@ -1085,6 +1183,7 @@ pub unsafe fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.32", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(1 <= N && N <= 32); vshiftins_v2i32(a, b, int32x2_t(-N, -N)) @@ -1094,6 +1193,7 @@ pub unsafe fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.32", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(1 <= N && N <= 32); vshiftins_v4i32(a, b, int32x4_t(-N, -N, -N, -N)) @@ -1103,6 +1203,7 @@ pub unsafe fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(1 <= N && N <= 64); vshiftins_v1i64(a, b, int64x1_t(-N as i64)) @@ -1112,6 +1213,7 @@ pub unsafe fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(1 <= N && N <= 64); vshiftins_v2i64(a, b, int64x2_t(-N as i64, -N as i64)) @@ -1121,6 +1223,7 @@ pub unsafe fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert!(1 <= N && N <= 8); let n = -N as i8; @@ -1135,6 +1238,7 @@ pub unsafe fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert!(1 <= N && N <= 8); let n = -N as i8; @@ -1149,6 +1253,7 @@ pub unsafe fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert!(1 <= N && N <= 16); let n = -N as i16; @@ -1163,6 +1268,7 @@ pub unsafe fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert!(1 <= N && N <= 16); let n = -N as i16; @@ -1177,6 +1283,7 @@ pub unsafe fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.32", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert!(1 <= N && N <= 32); transmute(vshiftins_v2i32( @@ -1190,6 +1297,7 @@ pub unsafe fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.32", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert!(1 <= N && N <= 32); transmute(vshiftins_v4i32( @@ -1203,6 +1311,7 @@ pub unsafe fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { static_assert!(1 <= N && N <= 64); transmute(vshiftins_v1i64( @@ -1216,6 +1325,7 @@ pub unsafe fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert!(1 <= N && N <= 64); transmute(vshiftins_v2i64( @@ -1229,6 +1339,7 @@ pub unsafe fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { static_assert!(1 <= N && N <= 8); let n = -N as i8; @@ -1243,6 +1354,7 @@ pub unsafe fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.8", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { static_assert!(1 <= N && N <= 8); let n = -N as i8; @@ -1257,6 +1369,7 @@ pub unsafe fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16 #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { static_assert!(1 <= N && N <= 16); let n = -N as i16; @@ -1271,6 +1384,7 @@ pub unsafe fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4 #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr("vsri.16", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { static_assert!(1 <= N && N <= 16); let n = -N as i16; @@ -1288,6 +1402,7 @@ pub unsafe fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x #[target_feature(enable = "neon,v7,aes")] #[cfg_attr(test, assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { static_assert!(1 <= N && N <= 64); transmute(vshiftins_v1i64( @@ -1304,6 +1419,7 @@ pub unsafe fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1 #[target_feature(enable = "neon,v7,aes")] #[cfg_attr(test, assert_instr("vsri.64", N = 1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { static_assert!(1 <= N && N <= 64); transmute(vshiftins_v2i64( diff --git a/crates/core_arch/src/arm/simd32.rs b/crates/core_arch/src/arm/simd32.rs index 2d867acc83..e81ad02a91 100644 --- a/crates/core_arch/src/arm/simd32.rs +++ b/crates/core_arch/src/arm/simd32.rs @@ -69,8 +69,10 @@ use crate::{core_arch::arm::dsp::int16x2_t, mem::transmute}; types! { /// ARM-specific 32-bit wide vector of four packed `i8`. + #[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub struct int8x4_t(i8, i8, i8, i8); /// ARM-specific 32-bit wide vector of four packed `u8`. + #[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub struct uint8x4_t(u8, u8, u8, u8); } @@ -161,6 +163,7 @@ extern "unadjusted" { /// res\[3\] = a\[3\] + b\[3\] #[inline] #[cfg_attr(test, assert_instr(qadd8))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __qadd8(a: int8x4_t, b: int8x4_t) -> int8x4_t { dsp_call!(arm_qadd8, a, b) } @@ -175,6 +178,7 @@ pub unsafe fn __qadd8(a: int8x4_t, b: int8x4_t) -> int8x4_t { /// res\[3\] = a\[3\] - b\[3\] #[inline] #[cfg_attr(test, assert_instr(qsub8))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __qsub8(a: int8x4_t, b: int8x4_t) -> int8x4_t { dsp_call!(arm_qsub8, a, b) } @@ -187,6 +191,7 @@ pub unsafe fn __qsub8(a: int8x4_t, b: int8x4_t) -> int8x4_t { /// res\[1\] = a\[1\] - b\[1\] #[inline] #[cfg_attr(test, assert_instr(qsub16))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __qsub16(a: int16x2_t, b: int16x2_t) -> int16x2_t { dsp_call!(arm_qsub16, a, b) } @@ -199,6 +204,7 @@ pub unsafe fn __qsub16(a: int16x2_t, b: int16x2_t) -> int16x2_t { /// res\[1\] = a\[1\] + b\[1\] #[inline] #[cfg_attr(test, assert_instr(qadd16))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __qadd16(a: int16x2_t, b: int16x2_t) -> int16x2_t { dsp_call!(arm_qadd16, a, b) } @@ -209,6 +215,7 @@ pub unsafe fn __qadd16(a: int16x2_t, b: int16x2_t) -> int16x2_t { /// res\[1\] = a\[1\] + b\[0\] #[inline] #[cfg_attr(test, assert_instr(qasx))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __qasx(a: int16x2_t, b: int16x2_t) -> int16x2_t { dsp_call!(arm_qasx, a, b) } @@ -219,6 +226,7 @@ pub unsafe fn __qasx(a: int16x2_t, b: int16x2_t) -> int16x2_t { /// res\[1\] = a\[1\] - b\[0\] #[inline] #[cfg_attr(test, assert_instr(qsax))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __qsax(a: int16x2_t, b: int16x2_t) -> int16x2_t { dsp_call!(arm_qsax, a, b) } @@ -231,6 +239,7 @@ pub unsafe fn __qsax(a: int16x2_t, b: int16x2_t) -> int16x2_t { /// and the GE bits of the APSR are set. #[inline] #[cfg_attr(test, assert_instr(sadd16))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __sadd16(a: int16x2_t, b: int16x2_t) -> int16x2_t { dsp_call!(arm_sadd16, a, b) } @@ -245,6 +254,7 @@ pub unsafe fn __sadd16(a: int16x2_t, b: int16x2_t) -> int16x2_t { /// and the GE bits of the APSR are set. #[inline] #[cfg_attr(test, assert_instr(sadd8))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __sadd8(a: int8x4_t, b: int8x4_t) -> int8x4_t { dsp_call!(arm_sadd8, a, b) } @@ -256,6 +266,7 @@ pub unsafe fn __sadd8(a: int8x4_t, b: int8x4_t) -> int8x4_t { /// res = a\[0\] * b\[0\] + a\[1\] * b\[1\] + c #[inline] #[cfg_attr(test, assert_instr(smlad))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smlad(a: int16x2_t, b: int16x2_t, c: i32) -> i32 { arm_smlad(transmute(a), transmute(b), c) } @@ -267,6 +278,7 @@ pub unsafe fn __smlad(a: int16x2_t, b: int16x2_t, c: i32) -> i32 { /// res = a\[0\] * b\[0\] - a\[1\] * b\[1\] + c #[inline] #[cfg_attr(test, assert_instr(smlsd))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smlsd(a: int16x2_t, b: int16x2_t, c: i32) -> i32 { arm_smlsd(transmute(a), transmute(b), c) } @@ -279,6 +291,7 @@ pub unsafe fn __smlsd(a: int16x2_t, b: int16x2_t, c: i32) -> i32 { /// and the GE bits of the APSR are set. #[inline] #[cfg_attr(test, assert_instr(sasx))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __sasx(a: int16x2_t, b: int16x2_t) -> int16x2_t { dsp_call!(arm_sasx, a, b) } @@ -295,6 +308,7 @@ pub unsafe fn __sasx(a: int16x2_t, b: int16x2_t) -> int16x2_t { /// where GE are bits of APSR #[inline] #[cfg_attr(test, assert_instr(sel))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __sel(a: int8x4_t, b: int8x4_t) -> int8x4_t { dsp_call!(arm_sel, a, b) } @@ -309,6 +323,7 @@ pub unsafe fn __sel(a: int8x4_t, b: int8x4_t) -> int8x4_t { /// res\[3\] = (a\[3\] + b\[3\]) / 2 #[inline] #[cfg_attr(test, assert_instr(shadd8))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __shadd8(a: int8x4_t, b: int8x4_t) -> int8x4_t { dsp_call!(arm_shadd8, a, b) } @@ -321,6 +336,7 @@ pub unsafe fn __shadd8(a: int8x4_t, b: int8x4_t) -> int8x4_t { /// res\[1\] = (a\[1\] + b\[1\]) / 2 #[inline] #[cfg_attr(test, assert_instr(shadd16))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __shadd16(a: int16x2_t, b: int16x2_t) -> int16x2_t { dsp_call!(arm_shadd16, a, b) } @@ -335,6 +351,7 @@ pub unsafe fn __shadd16(a: int16x2_t, b: int16x2_t) -> int16x2_t { /// res\[3\] = (a\[3\] - b\[3\]) / 2 #[inline] #[cfg_attr(test, assert_instr(shsub8))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __shsub8(a: int8x4_t, b: int8x4_t) -> int8x4_t { dsp_call!(arm_shsub8, a, b) } @@ -352,6 +369,7 @@ pub unsafe fn __shsub8(a: int8x4_t, b: int8x4_t) -> int8x4_t { /// The GE bits of the APSR are set. #[inline] #[cfg_attr(test, assert_instr(usub8))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __usub8(a: uint8x4_t, b: uint8x4_t) -> uint8x4_t { dsp_call!(arm_usub8, a, b) } @@ -369,6 +387,7 @@ pub unsafe fn __usub8(a: uint8x4_t, b: uint8x4_t) -> uint8x4_t { /// The GE bits of the APSR are set. #[inline] #[cfg_attr(test, assert_instr(ssub8))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __ssub8(a: int8x4_t, b: int8x4_t) -> int8x4_t { dsp_call!(arm_ssub8, a, b) } @@ -381,6 +400,7 @@ pub unsafe fn __ssub8(a: int8x4_t, b: int8x4_t) -> int8x4_t { /// res\[1\] = (a\[1\] - b\[1\]) / 2 #[inline] #[cfg_attr(test, assert_instr(shsub16))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __shsub16(a: int16x2_t, b: int16x2_t) -> int16x2_t { dsp_call!(arm_shsub16, a, b) } @@ -394,6 +414,7 @@ pub unsafe fn __shsub16(a: int16x2_t, b: int16x2_t) -> int16x2_t { /// and sets the Q flag if overflow occurs on the addition. #[inline] #[cfg_attr(test, assert_instr(smuad))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smuad(a: int16x2_t, b: int16x2_t) -> i32 { arm_smuad(transmute(a), transmute(b)) } @@ -407,6 +428,7 @@ pub unsafe fn __smuad(a: int16x2_t, b: int16x2_t) -> i32 { /// and sets the Q flag if overflow occurs on the addition. #[inline] #[cfg_attr(test, assert_instr(smuadx))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smuadx(a: int16x2_t, b: int16x2_t) -> i32 { arm_smuadx(transmute(a), transmute(b)) } @@ -420,6 +442,7 @@ pub unsafe fn __smuadx(a: int16x2_t, b: int16x2_t) -> i32 { /// and sets the Q flag if overflow occurs on the addition. #[inline] #[cfg_attr(test, assert_instr(smusd))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smusd(a: int16x2_t, b: int16x2_t) -> i32 { arm_smusd(transmute(a), transmute(b)) } @@ -433,6 +456,7 @@ pub unsafe fn __smusd(a: int16x2_t, b: int16x2_t) -> i32 { /// and sets the Q flag if overflow occurs on the addition. #[inline] #[cfg_attr(test, assert_instr(smusdx))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __smusdx(a: int16x2_t, b: int16x2_t) -> i32 { arm_smusdx(transmute(a), transmute(b)) } @@ -445,6 +469,7 @@ pub unsafe fn __smusdx(a: int16x2_t, b: int16x2_t) -> i32 { /// (a\[2\] - b\[2\]) + (a\[3\] - b\[3\]) #[inline] #[cfg_attr(test, assert_instr(usad8))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __usad8(a: int8x4_t, b: int8x4_t) -> u32 { arm_usad8(transmute(a), transmute(b)) } @@ -457,6 +482,7 @@ pub unsafe fn __usad8(a: int8x4_t, b: int8x4_t) -> u32 { /// (a\[2\] - b\[2\]) + (a\[3\] - b\[3\]) + c #[inline] #[cfg_attr(test, assert_instr(usad8))] +#[unstable(feature = "stdarch_arm_dsp", issue = "117237")] pub unsafe fn __usada8(a: int8x4_t, b: int8x4_t, c: u32) -> u32 { __usad8(a, b) + c } diff --git a/crates/core_arch/src/arm_shared/barrier/common.rs b/crates/core_arch/src/arm_shared/barrier/common.rs index 0fb35534d1..476a07ffae 100644 --- a/crates/core_arch/src/arm_shared/barrier/common.rs +++ b/crates/core_arch/src/arm_shared/barrier/common.rs @@ -2,10 +2,12 @@ /// Full system is the required shareability domain, reads and writes are the /// required access types +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub struct SY; dmb_dsb!(SY); +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] impl super::super::sealed::Isb for SY { #[inline(always)] unsafe fn __isb(&self) { diff --git a/crates/core_arch/src/arm_shared/barrier/cp15.rs b/crates/core_arch/src/arm_shared/barrier/cp15.rs index fe540a7d8d..ae9ce3c005 100644 --- a/crates/core_arch/src/arm_shared/barrier/cp15.rs +++ b/crates/core_arch/src/arm_shared/barrier/cp15.rs @@ -5,8 +5,10 @@ use crate::arch::asm; /// Full system is the required shareability domain, reads and writes are the /// required access types +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub struct SY; +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] impl super::super::sealed::Dmb for SY { #[inline(always)] unsafe fn __dmb(&self) { @@ -18,6 +20,7 @@ impl super::super::sealed::Dmb for SY { } } +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] impl super::super::sealed::Dsb for SY { #[inline(always)] unsafe fn __dsb(&self) { @@ -29,6 +32,7 @@ impl super::super::sealed::Dsb for SY { } } +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] impl super::super::sealed::Isb for SY { #[inline(always)] unsafe fn __isb(&self) { diff --git a/crates/core_arch/src/arm_shared/barrier/mod.rs b/crates/core_arch/src/arm_shared/barrier/mod.rs index 6ccced00e3..fda42024a1 100644 --- a/crates/core_arch/src/arm_shared/barrier/mod.rs +++ b/crates/core_arch/src/arm_shared/barrier/mod.rs @@ -16,6 +16,7 @@ mod cp15; target_feature = "v7", target_feature = "mclass" )))] +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub use self::cp15::*; // Dedicated instructions @@ -26,6 +27,7 @@ pub use self::cp15::*; ))] macro_rules! dmb_dsb { ($A:ident) => { + #[unstable(feature = "stdarch_arm_barrier", issue = "117219")] impl super::super::sealed::Dmb for $A { #[inline(always)] unsafe fn __dmb(&self) { @@ -33,6 +35,7 @@ macro_rules! dmb_dsb { } } + #[unstable(feature = "stdarch_arm_barrier", issue = "117219")] impl super::super::sealed::Dsb for $A { #[inline(always)] unsafe fn __dsb(&self) { @@ -54,18 +57,21 @@ mod common; target_feature = "v7", target_feature = "mclass" ))] +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub use self::common::*; #[cfg(any(target_arch = "aarch64", target_feature = "v7",))] mod not_mclass; #[cfg(any(target_arch = "aarch64", target_feature = "v7",))] +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub use self::not_mclass::*; #[cfg(target_arch = "aarch64")] mod v8; #[cfg(target_arch = "aarch64")] +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub use self::v8::*; /// Generates a DMB (data memory barrier) instruction or equivalent CP15 instruction. @@ -79,6 +85,7 @@ pub use self::v8::*; /// /// The __dmb() intrinsic also acts as a compiler memory barrier of the appropriate type. #[inline(always)] +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub unsafe fn __dmb(arg: A) where A: super::sealed::Dmb, @@ -94,6 +101,7 @@ where /// /// The __dsb() intrinsic also acts as a compiler memory barrier of the appropriate type. #[inline(always)] +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub unsafe fn __dsb(arg: A) where A: super::sealed::Dsb, @@ -115,6 +123,7 @@ where /// The only supported argument for the __isb() intrinsic is 15, corresponding to the SY (full /// system) scope of the ISB instruction. #[inline(always)] +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub unsafe fn __isb(arg: A) where A: super::sealed::Isb, diff --git a/crates/core_arch/src/arm_shared/barrier/not_mclass.rs b/crates/core_arch/src/arm_shared/barrier/not_mclass.rs index 385e1d5289..3b941b2715 100644 --- a/crates/core_arch/src/arm_shared/barrier/not_mclass.rs +++ b/crates/core_arch/src/arm_shared/barrier/not_mclass.rs @@ -2,42 +2,49 @@ /// Full system is the required shareability domain, writes are the required /// access type +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub struct ST; dmb_dsb!(ST); /// Inner Shareable is the required shareability domain, reads and writes are /// the required access types +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub struct ISH; dmb_dsb!(ISH); /// Inner Shareable is the required shareability domain, writes are the required /// access type +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub struct ISHST; dmb_dsb!(ISHST); /// Non-shareable is the required shareability domain, reads and writes are the /// required access types +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub struct NSH; dmb_dsb!(NSH); /// Non-shareable is the required shareability domain, writes are the required /// access type +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub struct NSHST; dmb_dsb!(NSHST); /// Outer Shareable is the required shareability domain, reads and writes are /// the required access types +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub struct OSH; dmb_dsb!(OSH); /// Outer Shareable is the required shareability domain, writes are the required /// access type +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub struct OSHST; dmb_dsb!(OSHST); diff --git a/crates/core_arch/src/arm_shared/barrier/v8.rs b/crates/core_arch/src/arm_shared/barrier/v8.rs index db15da805d..5bf757f9f7 100644 --- a/crates/core_arch/src/arm_shared/barrier/v8.rs +++ b/crates/core_arch/src/arm_shared/barrier/v8.rs @@ -1,23 +1,27 @@ /// Full system is the required shareability domain, reads are the required /// access type +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub struct LD; dmb_dsb!(LD); /// Inner Shareable is the required shareability domain, reads are the required /// access type +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub struct ISHLD; dmb_dsb!(ISHLD); /// Non-shareable is the required shareability domain, reads are the required /// access type +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub struct NSHLD; dmb_dsb!(NSHLD); /// Outer Shareable is the required shareability domain, reads are the required /// access type +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub struct OSHLD; dmb_dsb!(OSHLD); diff --git a/crates/core_arch/src/arm_shared/crc.rs b/crates/core_arch/src/arm_shared/crc.rs index 779d1ed426..b1f716e1aa 100644 --- a/crates/core_arch/src/arm_shared/crc.rs +++ b/crates/core_arch/src/arm_shared/crc.rs @@ -30,6 +30,7 @@ use stdarch_test::assert_instr; #[target_feature(enable = "crc")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(crc32b))] +#[unstable(feature = "stdarch_arm_crc32", issue = "117215")] pub unsafe fn __crc32b(crc: u32, data: u8) -> u32 { crc32b_(crc, data as u32) } @@ -41,6 +42,7 @@ pub unsafe fn __crc32b(crc: u32, data: u8) -> u32 { #[target_feature(enable = "crc")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(crc32h))] +#[unstable(feature = "stdarch_arm_crc32", issue = "117215")] pub unsafe fn __crc32h(crc: u32, data: u16) -> u32 { crc32h_(crc, data as u32) } @@ -52,6 +54,7 @@ pub unsafe fn __crc32h(crc: u32, data: u16) -> u32 { #[target_feature(enable = "crc")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(crc32w))] +#[unstable(feature = "stdarch_arm_crc32", issue = "117215")] pub unsafe fn __crc32w(crc: u32, data: u32) -> u32 { crc32w_(crc, data) } @@ -63,6 +66,7 @@ pub unsafe fn __crc32w(crc: u32, data: u32) -> u32 { #[target_feature(enable = "crc")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(crc32cb))] +#[unstable(feature = "stdarch_arm_crc32", issue = "117215")] pub unsafe fn __crc32cb(crc: u32, data: u8) -> u32 { crc32cb_(crc, data as u32) } @@ -74,6 +78,7 @@ pub unsafe fn __crc32cb(crc: u32, data: u8) -> u32 { #[target_feature(enable = "crc")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(crc32ch))] +#[unstable(feature = "stdarch_arm_crc32", issue = "117215")] pub unsafe fn __crc32ch(crc: u32, data: u16) -> u32 { crc32ch_(crc, data as u32) } @@ -85,6 +90,7 @@ pub unsafe fn __crc32ch(crc: u32, data: u16) -> u32 { #[target_feature(enable = "crc")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(crc32cw))] +#[unstable(feature = "stdarch_arm_crc32", issue = "117215")] pub unsafe fn __crc32cw(crc: u32, data: u32) -> u32 { crc32cw_(crc, data) } diff --git a/crates/core_arch/src/arm_shared/crypto.rs b/crates/core_arch/src/arm_shared/crypto.rs index 1d56ed6067..7ca84f1a8f 100644 --- a/crates/core_arch/src/arm_shared/crypto.rs +++ b/crates/core_arch/src/arm_shared/crypto.rs @@ -58,12 +58,13 @@ use stdarch_test::assert_instr; #[target_feature(enable = "aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(aese))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] #[cfg_attr( not(target_arch = "arm"), - stable( - feature = "aarch64_neon_crypto_intrinsics", - since = "CURRENT_RUSTC_VERSION" - ) + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { vaeseq_u8_(data, key) @@ -76,12 +77,13 @@ pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { #[target_feature(enable = "aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(aesd))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] #[cfg_attr( not(target_arch = "arm"), - stable( - feature = "aarch64_neon_crypto_intrinsics", - since = "CURRENT_RUSTC_VERSION" - ) + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { vaesdq_u8_(data, key) @@ -94,12 +96,13 @@ pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { #[target_feature(enable = "aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(aesmc))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] #[cfg_attr( not(target_arch = "arm"), - stable( - feature = "aarch64_neon_crypto_intrinsics", - since = "CURRENT_RUSTC_VERSION" - ) + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { vaesmcq_u8_(data) @@ -112,12 +115,13 @@ pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { #[target_feature(enable = "aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(aesimc))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] #[cfg_attr( not(target_arch = "arm"), - stable( - feature = "aarch64_neon_crypto_intrinsics", - since = "CURRENT_RUSTC_VERSION" - ) + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { vaesimcq_u8_(data) @@ -130,12 +134,13 @@ pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha1h))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] #[cfg_attr( not(target_arch = "arm"), - stable( - feature = "aarch64_neon_crypto_intrinsics", - since = "CURRENT_RUSTC_VERSION" - ) + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 { vsha1h_u32_(hash_e) @@ -148,12 +153,13 @@ pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 { #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha1c))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] #[cfg_attr( not(target_arch = "arm"), - stable( - feature = "aarch64_neon_crypto_intrinsics", - since = "CURRENT_RUSTC_VERSION" - ) + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { vsha1cq_u32_(hash_abcd, hash_e, wk) @@ -166,12 +172,13 @@ pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha1m))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] #[cfg_attr( not(target_arch = "arm"), - stable( - feature = "aarch64_neon_crypto_intrinsics", - since = "CURRENT_RUSTC_VERSION" - ) + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { vsha1mq_u32_(hash_abcd, hash_e, wk) @@ -184,12 +191,13 @@ pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha1p))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] #[cfg_attr( not(target_arch = "arm"), - stable( - feature = "aarch64_neon_crypto_intrinsics", - since = "CURRENT_RUSTC_VERSION" - ) + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { vsha1pq_u32_(hash_abcd, hash_e, wk) @@ -202,12 +210,13 @@ pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha1su0))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] #[cfg_attr( not(target_arch = "arm"), - stable( - feature = "aarch64_neon_crypto_intrinsics", - since = "CURRENT_RUSTC_VERSION" - ) + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t { vsha1su0q_u32_(w0_3, w4_7, w8_11) @@ -220,12 +229,13 @@ pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_ #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha1su1))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] #[cfg_attr( not(target_arch = "arm"), - stable( - feature = "aarch64_neon_crypto_intrinsics", - since = "CURRENT_RUSTC_VERSION" - ) + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t { vsha1su1q_u32_(tw0_3, w12_15) @@ -238,12 +248,13 @@ pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha256h))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] #[cfg_attr( not(target_arch = "arm"), - stable( - feature = "aarch64_neon_crypto_intrinsics", - since = "CURRENT_RUSTC_VERSION" - ) + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha256hq_u32( hash_abcd: uint32x4_t, @@ -260,12 +271,13 @@ pub unsafe fn vsha256hq_u32( #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha256h2))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] #[cfg_attr( not(target_arch = "arm"), - stable( - feature = "aarch64_neon_crypto_intrinsics", - since = "CURRENT_RUSTC_VERSION" - ) + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha256h2q_u32( hash_efgh: uint32x4_t, @@ -282,12 +294,13 @@ pub unsafe fn vsha256h2q_u32( #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha256su0))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] #[cfg_attr( not(target_arch = "arm"), - stable( - feature = "aarch64_neon_crypto_intrinsics", - since = "CURRENT_RUSTC_VERSION" - ) + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t { vsha256su0q_u32_(w0_3, w4_7) @@ -300,12 +313,13 @@ pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha256su1))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] #[cfg_attr( not(target_arch = "arm"), - stable( - feature = "aarch64_neon_crypto_intrinsics", - since = "CURRENT_RUSTC_VERSION" - ) + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") )] pub unsafe fn vsha256su1q_u32( tw0_3: uint32x4_t, diff --git a/crates/core_arch/src/arm_shared/hints.rs b/crates/core_arch/src/arm_shared/hints.rs index d7e43f5517..bd0416fc30 100644 --- a/crates/core_arch/src/arm_shared/hints.rs +++ b/crates/core_arch/src/arm_shared/hints.rs @@ -11,6 +11,7 @@ // LLVM says "instruction requires: armv6k" #[cfg(any(target_feature = "v6", target_arch = "aarch64", doc))] #[inline(always)] +#[unstable(feature = "stdarch_arm_hints", issue = "117218")] pub unsafe fn __wfi() { hint(HINT_WFI); } @@ -24,6 +25,7 @@ pub unsafe fn __wfi() { // LLVM says "instruction requires: armv6k" #[cfg(any(target_feature = "v6", target_arch = "aarch64", doc))] #[inline(always)] +#[unstable(feature = "stdarch_arm_hints", issue = "117218")] pub unsafe fn __wfe() { hint(HINT_WFE); } @@ -36,6 +38,7 @@ pub unsafe fn __wfe() { // LLVM says "instruction requires: armv6k" #[cfg(any(target_feature = "v6", target_arch = "aarch64", doc))] #[inline(always)] +#[unstable(feature = "stdarch_arm_hints", issue = "117218")] pub unsafe fn __sev() { hint(HINT_SEV); } @@ -52,6 +55,7 @@ pub unsafe fn __sev() { doc, ))] #[inline(always)] +#[unstable(feature = "stdarch_arm_hints", issue = "117218")] pub unsafe fn __sevl() { hint(HINT_SEVL); } @@ -65,6 +69,7 @@ pub unsafe fn __sevl() { // LLVM says "instruction requires: armv6k" #[cfg(any(target_feature = "v6", target_arch = "aarch64", doc))] #[inline(always)] +#[unstable(feature = "stdarch_arm_hints", issue = "117218")] pub unsafe fn __yield() { hint(HINT_YIELD); } @@ -76,6 +81,7 @@ pub unsafe fn __yield() { /// another instruction. It is not guaranteed that inserting this instruction /// will increase execution time. #[inline(always)] +#[unstable(feature = "stdarch_arm_hints", issue = "117218")] pub unsafe fn __nop() { crate::arch::asm!("nop", options(nomem, nostack, preserves_flags)); } diff --git a/crates/core_arch/src/arm_shared/mod.rs b/crates/core_arch/src/arm_shared/mod.rs index 8e2fcf03cf..ca5b3a6465 100644 --- a/crates/core_arch/src/arm_shared/mod.rs +++ b/crates/core_arch/src/arm_shared/mod.rs @@ -53,15 +53,15 @@ // 8, 7 and 6-M are supported via dedicated instructions like DMB. All other arches are supported // via CP15 instructions. See Section 10.1 of ACLE mod barrier; - +#[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub use self::barrier::*; mod hints; +#[unstable(feature = "stdarch_arm_hints", issue = "117218")] pub use self::hints::*; -#[cfg(any(target_arch = "aarch64", target_feature = "v7", doc))] mod crc; -#[cfg(any(target_arch = "aarch64", target_feature = "v7", doc))] +#[unstable(feature = "stdarch_arm_crc32", issue = "117215")] pub use crc::*; // NEON intrinsics are currently broken on big-endian, so don't expose them. (#1484) @@ -71,6 +71,14 @@ mod crypto; // NEON intrinsics are currently broken on big-endian, so don't expose them. (#1484) #[cfg(target_endian = "little")] #[cfg(any(target_arch = "aarch64", target_feature = "v7", doc))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0") +)] pub use self::crypto::*; // NEON intrinsics are currently broken on big-endian, so don't expose them. (#1484) @@ -79,6 +87,14 @@ pub use self::crypto::*; pub(crate) mod neon; #[cfg(target_endian = "little")] #[cfg(any(target_arch = "aarch64", target_feature = "v7", doc))] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub use self::neon::*; #[cfg(test)] @@ -86,39 +102,18 @@ pub use self::neon::*; pub(crate) mod test_support; mod sealed { + #[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub trait Dmb { unsafe fn __dmb(&self); } + #[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub trait Dsb { unsafe fn __dsb(&self); } + #[unstable(feature = "stdarch_arm_barrier", issue = "117219")] pub trait Isb { unsafe fn __isb(&self); } - - pub trait Rsr { - unsafe fn __rsr(&self) -> u32; - } - - pub trait Rsr64 { - unsafe fn __rsr64(&self) -> u64; - } - - pub trait Rsrp { - unsafe fn __rsrp(&self) -> *const u8; - } - - pub trait Wsr { - unsafe fn __wsr(&self, value: u32); - } - - pub trait Wsr64 { - unsafe fn __wsr64(&self, value: u64); - } - - pub trait Wsrp { - unsafe fn __wsrp(&self, value: *const u8); - } } diff --git a/crates/core_arch/src/arm_shared/neon/generated.rs b/crates/core_arch/src/arm_shared/neon/generated.rs index 34dc3a3342..46ae37c54e 100644 --- a/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/crates/core_arch/src/arm_shared/neon/generated.rs @@ -18,6 +18,7 @@ use stdarch_test::assert_instr; #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_and(a, b) } @@ -31,6 +32,7 @@ pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_and(a, b) } @@ -44,6 +46,7 @@ pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_and(a, b) } @@ -57,6 +60,7 @@ pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_and(a, b) } @@ -70,6 +74,7 @@ pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_and(a, b) } @@ -83,6 +88,7 @@ pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_and(a, b) } @@ -96,6 +102,7 @@ pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_and(a, b) } @@ -109,6 +116,7 @@ pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_and(a, b) } @@ -122,6 +130,7 @@ pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_and(a, b) } @@ -135,6 +144,7 @@ pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_and(a, b) } @@ -148,6 +158,7 @@ pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_and(a, b) } @@ -161,6 +172,7 @@ pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_and(a, b) } @@ -174,6 +186,7 @@ pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_and(a, b) } @@ -187,6 +200,7 @@ pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_and(a, b) } @@ -200,6 +214,7 @@ pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_and(a, b) } @@ -213,6 +228,7 @@ pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_and(a, b) } @@ -226,6 +242,7 @@ pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_or(a, b) } @@ -239,6 +256,7 @@ pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_or(a, b) } @@ -252,6 +270,7 @@ pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_or(a, b) } @@ -265,6 +284,7 @@ pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_or(a, b) } @@ -278,6 +298,7 @@ pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_or(a, b) } @@ -291,6 +312,7 @@ pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_or(a, b) } @@ -304,6 +326,7 @@ pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_or(a, b) } @@ -317,6 +340,7 @@ pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_or(a, b) } @@ -330,6 +354,7 @@ pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_or(a, b) } @@ -343,6 +368,7 @@ pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_or(a, b) } @@ -356,6 +382,7 @@ pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_or(a, b) } @@ -369,6 +396,7 @@ pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_or(a, b) } @@ -382,6 +410,7 @@ pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_or(a, b) } @@ -395,6 +424,7 @@ pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_or(a, b) } @@ -408,6 +438,7 @@ pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_or(a, b) } @@ -421,6 +452,7 @@ pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_or(a, b) } @@ -434,6 +466,7 @@ pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_xor(a, b) } @@ -447,6 +480,7 @@ pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_xor(a, b) } @@ -460,6 +494,7 @@ pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_xor(a, b) } @@ -473,6 +508,7 @@ pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_xor(a, b) } @@ -486,6 +522,7 @@ pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_xor(a, b) } @@ -499,6 +536,7 @@ pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_xor(a, b) } @@ -512,6 +550,7 @@ pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_xor(a, b) } @@ -525,6 +564,7 @@ pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_xor(a, b) } @@ -538,6 +578,7 @@ pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_xor(a, b) } @@ -551,6 +592,7 @@ pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_xor(a, b) } @@ -564,6 +606,7 @@ pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_xor(a, b) } @@ -577,6 +620,7 @@ pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_xor(a, b) } @@ -590,6 +634,7 @@ pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_xor(a, b) } @@ -603,6 +648,7 @@ pub unsafe fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_xor(a, b) } @@ -616,6 +662,7 @@ pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_xor(a, b) } @@ -629,6 +676,7 @@ pub unsafe fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_xor(a, b) } @@ -642,6 +690,7 @@ pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -661,6 +710,7 @@ vabd_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -680,6 +730,7 @@ vabdq_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -699,6 +750,7 @@ vabd_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -718,6 +770,7 @@ vabdq_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -737,6 +790,7 @@ vabd_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -756,6 +810,7 @@ vabdq_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -775,6 +830,7 @@ vabd_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -794,6 +850,7 @@ vabdq_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -813,6 +870,7 @@ vabd_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -832,6 +890,7 @@ vabdq_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -851,6 +910,7 @@ vabd_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -870,6 +930,7 @@ vabdq_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -889,6 +950,7 @@ vabd_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -908,6 +970,7 @@ vabdq_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabdl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { simd_cast(vabd_u8(a, b)) } @@ -921,6 +984,7 @@ pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabdl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { simd_cast(vabd_u16(a, b)) } @@ -934,6 +998,7 @@ pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabdl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { simd_cast(vabd_u32(a, b)) } @@ -947,6 +1012,7 @@ pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabdl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { let c: uint8x8_t = simd_cast(vabd_s8(a, b)); simd_cast(c) @@ -961,6 +1027,7 @@ pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabdl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { let c: uint16x4_t = simd_cast(vabd_s16(a, b)); simd_cast(c) @@ -975,6 +1042,7 @@ pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabdl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { let c: uint32x2_t = simd_cast(vabd_s32(a, b)); simd_cast(c) @@ -989,6 +1057,7 @@ pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_eq(a, b) } @@ -1002,6 +1071,7 @@ pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_eq(a, b) } @@ -1015,6 +1085,7 @@ pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_eq(a, b) } @@ -1028,6 +1099,7 @@ pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_eq(a, b) } @@ -1041,6 +1113,7 @@ pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_eq(a, b) } @@ -1054,6 +1127,7 @@ pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_eq(a, b) } @@ -1067,6 +1141,7 @@ pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_eq(a, b) } @@ -1080,6 +1155,7 @@ pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_eq(a, b) } @@ -1093,6 +1169,7 @@ pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_eq(a, b) } @@ -1106,6 +1183,7 @@ pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_eq(a, b) } @@ -1119,6 +1197,7 @@ pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_eq(a, b) } @@ -1132,6 +1211,7 @@ pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_eq(a, b) } @@ -1145,6 +1225,7 @@ pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { simd_eq(a, b) } @@ -1158,6 +1239,7 @@ pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { simd_eq(a, b) } @@ -1171,6 +1253,7 @@ pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmeq))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_eq(a, b) } @@ -1184,6 +1267,7 @@ pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmeq))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_eq(a, b) } @@ -1197,6 +1281,7 @@ pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { let c: int8x8_t = simd_and(a, b); let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1212,6 +1297,7 @@ pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { let c: int8x16_t = simd_and(a, b); let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); @@ -1227,6 +1313,7 @@ pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { let c: int16x4_t = simd_and(a, b); let d: i16x4 = i16x4::new(0, 0, 0, 0); @@ -1242,6 +1329,7 @@ pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { let c: int16x8_t = simd_and(a, b); let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1257,6 +1345,7 @@ pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { let c: int32x2_t = simd_and(a, b); let d: i32x2 = i32x2::new(0, 0); @@ -1272,6 +1361,7 @@ pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { let c: int32x4_t = simd_and(a, b); let d: i32x4 = i32x4::new(0, 0, 0, 0); @@ -1287,6 +1377,7 @@ pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { let c: poly8x8_t = simd_and(a, b); let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1302,6 +1393,7 @@ pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { let c: poly8x16_t = simd_and(a, b); let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); @@ -1317,6 +1409,7 @@ pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { let c: poly16x4_t = simd_and(a, b); let d: i16x4 = i16x4::new(0, 0, 0, 0); @@ -1332,6 +1425,7 @@ pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { let c: poly16x8_t = simd_and(a, b); let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1347,6 +1441,7 @@ pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let c: uint8x8_t = simd_and(a, b); let d: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1362,6 +1457,7 @@ pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { let c: uint8x16_t = simd_and(a, b); let d: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); @@ -1377,6 +1473,7 @@ pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let c: uint16x4_t = simd_and(a, b); let d: u16x4 = u16x4::new(0, 0, 0, 0); @@ -1392,6 +1489,7 @@ pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let c: uint16x8_t = simd_and(a, b); let d: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1407,6 +1505,7 @@ pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let c: uint32x2_t = simd_and(a, b); let d: u32x2 = u32x2::new(0, 0); @@ -1422,6 +1521,7 @@ pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let c: uint32x4_t = simd_and(a, b); let d: u32x4 = u32x4::new(0, 0, 0, 0); @@ -1437,6 +1537,7 @@ pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { simd_fabs(a) } @@ -1450,6 +1551,7 @@ pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { simd_fabs(a) } @@ -1463,6 +1565,7 @@ pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_gt(a, b) } @@ -1476,6 +1579,7 @@ pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_gt(a, b) } @@ -1489,6 +1593,7 @@ pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_gt(a, b) } @@ -1502,6 +1607,7 @@ pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_gt(a, b) } @@ -1515,6 +1621,7 @@ pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_gt(a, b) } @@ -1528,6 +1635,7 @@ pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_gt(a, b) } @@ -1541,6 +1649,7 @@ pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_gt(a, b) } @@ -1554,6 +1663,7 @@ pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_gt(a, b) } @@ -1567,6 +1677,7 @@ pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_gt(a, b) } @@ -1580,6 +1691,7 @@ pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_gt(a, b) } @@ -1593,6 +1705,7 @@ pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_gt(a, b) } @@ -1606,6 +1719,7 @@ pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_gt(a, b) } @@ -1619,6 +1733,7 @@ pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_gt(a, b) } @@ -1632,6 +1747,7 @@ pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_gt(a, b) } @@ -1645,6 +1761,7 @@ pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_lt(a, b) } @@ -1658,6 +1775,7 @@ pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_lt(a, b) } @@ -1671,6 +1789,7 @@ pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_lt(a, b) } @@ -1684,6 +1803,7 @@ pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_lt(a, b) } @@ -1697,6 +1817,7 @@ pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_lt(a, b) } @@ -1710,6 +1831,7 @@ pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_lt(a, b) } @@ -1723,6 +1845,7 @@ pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_lt(a, b) } @@ -1736,6 +1859,7 @@ pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_lt(a, b) } @@ -1749,6 +1873,7 @@ pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_lt(a, b) } @@ -1762,6 +1887,7 @@ pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_lt(a, b) } @@ -1775,6 +1901,7 @@ pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_lt(a, b) } @@ -1788,6 +1915,7 @@ pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_lt(a, b) } @@ -1801,6 +1929,7 @@ pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_lt(a, b) } @@ -1814,6 +1943,7 @@ pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_lt(a, b) } @@ -1827,6 +1957,7 @@ pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_le(a, b) } @@ -1840,6 +1971,7 @@ pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_le(a, b) } @@ -1853,6 +1985,7 @@ pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_le(a, b) } @@ -1866,6 +1999,7 @@ pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_le(a, b) } @@ -1879,6 +2013,7 @@ pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_le(a, b) } @@ -1892,6 +2027,7 @@ pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_le(a, b) } @@ -1905,6 +2041,7 @@ pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_le(a, b) } @@ -1918,6 +2055,7 @@ pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_le(a, b) } @@ -1931,6 +2069,7 @@ pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_le(a, b) } @@ -1944,6 +2083,7 @@ pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_le(a, b) } @@ -1957,6 +2097,7 @@ pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_le(a, b) } @@ -1970,6 +2111,7 @@ pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_le(a, b) } @@ -1983,6 +2125,7 @@ pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_le(a, b) } @@ -1996,6 +2139,7 @@ pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_le(a, b) } @@ -2009,6 +2153,7 @@ pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_ge(a, b) } @@ -2022,6 +2167,7 @@ pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_ge(a, b) } @@ -2035,6 +2181,7 @@ pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_ge(a, b) } @@ -2048,6 +2195,7 @@ pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_ge(a, b) } @@ -2061,6 +2209,7 @@ pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_ge(a, b) } @@ -2074,6 +2223,7 @@ pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_ge(a, b) } @@ -2087,6 +2237,7 @@ pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_ge(a, b) } @@ -2100,6 +2251,7 @@ pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_ge(a, b) } @@ -2113,6 +2265,7 @@ pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_ge(a, b) } @@ -2126,6 +2279,7 @@ pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_ge(a, b) } @@ -2139,6 +2293,7 @@ pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_ge(a, b) } @@ -2152,6 +2307,7 @@ pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_ge(a, b) } @@ -2165,6 +2321,7 @@ pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_ge(a, b) } @@ -2178,6 +2335,7 @@ pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_ge(a, b) } @@ -2191,6 +2349,7 @@ pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2210,6 +2369,7 @@ vcls_s8_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2229,6 +2389,7 @@ vclsq_s8_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2248,6 +2409,7 @@ vcls_s16_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2267,6 +2429,7 @@ vclsq_s16_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2286,6 +2449,7 @@ vcls_s32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2305,6 +2469,7 @@ vclsq_s32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { transmute(vcls_s8(transmute(a))) } @@ -2318,6 +2483,7 @@ pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { transmute(vclsq_s8(transmute(a))) } @@ -2331,6 +2497,7 @@ pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { transmute(vcls_s16(transmute(a))) } @@ -2344,6 +2511,7 @@ pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { transmute(vclsq_s16(transmute(a))) } @@ -2357,6 +2525,7 @@ pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { transmute(vcls_s32(transmute(a))) } @@ -2370,6 +2539,7 @@ pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { transmute(vclsq_s32(transmute(a))) } @@ -2383,6 +2553,7 @@ pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { vclz_s8_(a) } @@ -2396,6 +2567,7 @@ pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { vclzq_s8_(a) } @@ -2409,6 +2581,7 @@ pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { vclz_s16_(a) } @@ -2422,6 +2595,7 @@ pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { vclzq_s16_(a) } @@ -2435,6 +2609,7 @@ pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { vclz_s32_(a) } @@ -2448,6 +2623,7 @@ pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { vclzq_s32_(a) } @@ -2461,6 +2637,7 @@ pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { transmute(vclz_s8_(transmute(a))) } @@ -2474,6 +2651,7 @@ pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { transmute(vclzq_s8_(transmute(a))) } @@ -2487,6 +2665,7 @@ pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { transmute(vclz_s16_(transmute(a))) } @@ -2500,6 +2679,7 @@ pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { transmute(vclzq_s16_(transmute(a))) } @@ -2513,6 +2693,7 @@ pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { transmute(vclz_s32_(transmute(a))) } @@ -2526,6 +2707,7 @@ pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { transmute(vclzq_s32_(transmute(a))) } @@ -2539,6 +2721,7 @@ pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2558,6 +2741,7 @@ vcagt_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2577,6 +2761,7 @@ vcagtq_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2596,6 +2781,7 @@ vcage_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2615,6 +2801,7 @@ vcageq_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { vcagt_f32(b, a) } @@ -2628,6 +2815,7 @@ pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { vcagtq_f32(b, a) } @@ -2641,6 +2829,7 @@ pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { vcage_f32(b, a) } @@ -2654,6 +2843,7 @@ pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { vcageq_f32(b, a) } @@ -2667,6 +2857,7 @@ pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { transmute(a) } @@ -2680,6 +2871,7 @@ pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { transmute(a) } @@ -2693,6 +2885,7 @@ pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { transmute(a) } @@ -2706,6 +2899,7 @@ pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcreate_s64(a: u64) -> int64x1_t { transmute(a) } @@ -2719,6 +2913,7 @@ pub unsafe fn vcreate_s64(a: u64) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { transmute(a) } @@ -2732,6 +2927,7 @@ pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { transmute(a) } @@ -2745,6 +2941,7 @@ pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { transmute(a) } @@ -2758,6 +2955,7 @@ pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcreate_u64(a: u64) -> uint64x1_t { transmute(a) } @@ -2771,6 +2969,7 @@ pub unsafe fn vcreate_u64(a: u64) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { transmute(a) } @@ -2784,6 +2983,7 @@ pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { transmute(a) } @@ -2797,6 +2997,7 @@ pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t { transmute(a) } @@ -2810,6 +3011,7 @@ pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { transmute(a) } @@ -2823,6 +3025,7 @@ pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(scvtf))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { simd_cast(a) } @@ -2836,6 +3039,7 @@ pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(scvtf))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { simd_cast(a) } @@ -2849,6 +3053,7 @@ pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ucvtf))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { simd_cast(a) } @@ -2862,6 +3067,7 @@ pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ucvtf))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { simd_cast(a) } @@ -2874,6 +3080,7 @@ pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -2911,6 +3118,7 @@ vcvt_n_f32_s32_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -2948,6 +3156,7 @@ vcvtq_n_f32_s32_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -2985,6 +3194,7 @@ vcvt_n_f32_u32_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -3022,6 +3232,7 @@ vcvtq_n_f32_u32_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -3059,6 +3270,7 @@ vcvt_n_s32_f32_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -3096,6 +3308,7 @@ vcvtq_n_s32_f32_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -3133,6 +3346,7 @@ vcvt_n_u32_f32_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vcvt, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -3171,6 +3385,7 @@ vcvtq_n_u32_f32_(a, N) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3190,6 +3405,7 @@ vcvt_s32_f32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3209,6 +3425,7 @@ vcvtq_s32_f32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzu))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3228,6 +3445,7 @@ vcvt_u32_f32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzu))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3248,6 +3466,7 @@ vcvtq_u32_f32_(a) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3263,6 +3482,7 @@ pub unsafe fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { static_assert_uimm_bits!(N, 4); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3278,6 +3498,7 @@ pub unsafe fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3293,6 +3514,7 @@ pub unsafe fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3308,6 +3530,7 @@ pub unsafe fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) @@ -3323,6 +3546,7 @@ pub unsafe fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3338,6 +3562,7 @@ pub unsafe fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { static_assert_uimm_bits!(N, 4); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3353,6 +3578,7 @@ pub unsafe fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3368,6 +3594,7 @@ pub unsafe fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32]) @@ -3383,6 +3610,7 @@ pub unsafe fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3398,6 +3626,7 @@ pub unsafe fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3413,6 +3642,7 @@ pub unsafe fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3428,6 +3658,7 @@ pub unsafe fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3443,6 +3674,7 @@ pub unsafe fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { static_assert_uimm_bits!(N, 4); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3458,6 +3690,7 @@ pub unsafe fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3473,6 +3706,7 @@ pub unsafe fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3488,6 +3722,7 @@ pub unsafe fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) @@ -3503,6 +3738,7 @@ pub unsafe fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3518,6 +3754,7 @@ pub unsafe fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { static_assert_uimm_bits!(N, 4); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3533,6 +3770,7 @@ pub unsafe fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3548,6 +3786,7 @@ pub unsafe fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32]) @@ -3563,6 +3802,7 @@ pub unsafe fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3578,6 +3818,7 @@ pub unsafe fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3593,6 +3834,7 @@ pub unsafe fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3608,6 +3850,7 @@ pub unsafe fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3623,6 +3866,7 @@ pub unsafe fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { static_assert_uimm_bits!(N, 4); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3638,6 +3882,7 @@ pub unsafe fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3653,6 +3898,7 @@ pub unsafe fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3668,6 +3914,7 @@ pub unsafe fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { static_assert_uimm_bits!(N, 4); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3683,6 +3930,7 @@ pub unsafe fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3698,6 +3946,7 @@ pub unsafe fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { static_assert_uimm_bits!(N, 3); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3713,6 +3962,7 @@ pub unsafe fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3728,6 +3978,7 @@ pub unsafe fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) @@ -3743,6 +3994,7 @@ pub unsafe fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 0))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { static_assert!(N == 0); simd_shuffle!(a, a, [N as u32, N as u32]) @@ -3758,6 +4010,7 @@ pub unsafe fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) @@ -3773,6 +4026,7 @@ pub unsafe fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 0))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { static_assert!(N == 0); simd_shuffle!(a, a, [N as u32, N as u32]) @@ -3788,6 +4042,7 @@ pub unsafe fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32]) @@ -3803,6 +4058,7 @@ pub unsafe fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3818,6 +4074,7 @@ pub unsafe fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { static_assert_uimm_bits!(N, 2); simd_shuffle!(a, a, [N as u32, N as u32]) @@ -3833,6 +4090,7 @@ pub unsafe fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { static_assert_uimm_bits!(N, 1); simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32]) @@ -3848,6 +4106,7 @@ pub unsafe fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { static_assert!(N == 0); a @@ -3863,6 +4122,7 @@ pub unsafe fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { static_assert!(N == 0); a @@ -3878,6 +4138,7 @@ pub unsafe fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { static_assert_uimm_bits!(N, 1); transmute::(simd_extract(a, N as u32)) @@ -3893,6 +4154,7 @@ pub unsafe fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { static_assert_uimm_bits!(N, 1); transmute::(simd_extract(a, N as u32)) @@ -3908,6 +4170,7 @@ pub unsafe fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert_uimm_bits!(N, 3); match N & 0b111 { @@ -3933,6 +4196,7 @@ pub unsafe fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 15))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert_uimm_bits!(N, 4); match N & 0b1111 { @@ -3966,6 +4230,7 @@ pub unsafe fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_uimm_bits!(N, 2); match N & 0b11 { @@ -3987,6 +4252,7 @@ pub unsafe fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_uimm_bits!(N, 3); match N & 0b111 { @@ -4012,6 +4278,7 @@ pub unsafe fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert_uimm_bits!(N, 1); match N & 0b1 { @@ -4031,6 +4298,7 @@ pub unsafe fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert_uimm_bits!(N, 2); match N & 0b11 { @@ -4052,6 +4320,7 @@ pub unsafe fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); match N & 0b111 { @@ -4077,6 +4346,7 @@ pub unsafe fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 15))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert_uimm_bits!(N, 4); match N & 0b1111 { @@ -4110,6 +4380,7 @@ pub unsafe fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert_uimm_bits!(N, 2); match N & 0b11 { @@ -4131,6 +4402,7 @@ pub unsafe fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert_uimm_bits!(N, 3); match N & 0b111 { @@ -4156,6 +4428,7 @@ pub unsafe fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_ #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert_uimm_bits!(N, 1); match N & 0b1 { @@ -4175,6 +4448,7 @@ pub unsafe fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert_uimm_bits!(N, 2); match N & 0b11 { @@ -4196,6 +4470,7 @@ pub unsafe fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_ #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { static_assert_uimm_bits!(N, 3); match N & 0b111 { @@ -4221,6 +4496,7 @@ pub unsafe fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 15))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { static_assert_uimm_bits!(N, 4); match N & 0b1111 { @@ -4254,6 +4530,7 @@ pub unsafe fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { static_assert_uimm_bits!(N, 2); match N & 0b11 { @@ -4275,6 +4552,7 @@ pub unsafe fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { static_assert_uimm_bits!(N, 3); match N & 0b111 { @@ -4300,6 +4578,7 @@ pub unsafe fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_ #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert_uimm_bits!(N, 1); match N & 0b1 { @@ -4319,6 +4598,7 @@ pub unsafe fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert_uimm_bits!(N, 1); match N & 0b1 { @@ -4338,6 +4618,7 @@ pub unsafe fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_ #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { static_assert_uimm_bits!(N, 1); match N & 0b1 { @@ -4357,6 +4638,7 @@ pub unsafe fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { static_assert_uimm_bits!(N, 2); match N & 0b11 { @@ -4377,6 +4659,7 @@ pub unsafe fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { simd_add(a, simd_mul(b, c)) } @@ -4390,6 +4673,7 @@ pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { simd_add(a, simd_mul(b, c)) } @@ -4403,6 +4687,7 @@ pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { simd_add(a, simd_mul(b, c)) } @@ -4416,6 +4701,7 @@ pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { simd_add(a, simd_mul(b, c)) } @@ -4429,6 +4715,7 @@ pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { simd_add(a, simd_mul(b, c)) } @@ -4442,6 +4729,7 @@ pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { simd_add(a, simd_mul(b, c)) } @@ -4455,6 +4743,7 @@ pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { simd_add(a, simd_mul(b, c)) } @@ -4468,6 +4757,7 @@ pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { simd_add(a, simd_mul(b, c)) } @@ -4481,6 +4771,7 @@ pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { simd_add(a, simd_mul(b, c)) } @@ -4494,6 +4785,7 @@ pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { simd_add(a, simd_mul(b, c)) } @@ -4507,6 +4799,7 @@ pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { simd_add(a, simd_mul(b, c)) } @@ -4520,6 +4813,7 @@ pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { simd_add(a, simd_mul(b, c)) } @@ -4533,6 +4827,7 @@ pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { simd_add(a, simd_mul(b, c)) } @@ -4546,6 +4841,7 @@ pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { simd_add(a, simd_mul(b, c)) } @@ -4559,6 +4855,7 @@ pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { vmla_s16(a, b, vdup_n_s16(c)) } @@ -4572,6 +4869,7 @@ pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { vmlaq_s16(a, b, vdupq_n_s16(c)) } @@ -4585,6 +4883,7 @@ pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { vmla_s32(a, b, vdup_n_s32(c)) } @@ -4598,6 +4897,7 @@ pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { vmlaq_s32(a, b, vdupq_n_s32(c)) } @@ -4611,6 +4911,7 @@ pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { vmla_u16(a, b, vdup_n_u16(c)) } @@ -4624,6 +4925,7 @@ pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { vmlaq_u16(a, b, vdupq_n_u16(c)) } @@ -4637,6 +4939,7 @@ pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { vmla_u32(a, b, vdup_n_u32(c)) } @@ -4650,6 +4953,7 @@ pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { vmlaq_u32(a, b, vdupq_n_u32(c)) } @@ -4663,6 +4967,7 @@ pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vmla_f32(a, b, vdup_n_f32(c)) } @@ -4676,6 +4981,7 @@ pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vmlaq_f32(a, b, vdupq_n_f32(c)) } @@ -4690,6 +4996,7 @@ pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { static_assert_uimm_bits!(LANE, 2); vmla_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4705,6 +5012,7 @@ pub unsafe fn vmla_lane_s16(a: int16x4_t, b: int16x4_t, c: int1 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { static_assert_uimm_bits!(LANE, 3); vmla_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4720,6 +5028,7 @@ pub unsafe fn vmla_laneq_s16(a: int16x4_t, b: int16x4_t, c: int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { static_assert_uimm_bits!(LANE, 2); vmlaq_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4735,6 +5044,7 @@ pub unsafe fn vmlaq_lane_s16(a: int16x8_t, b: int16x8_t, c: int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { static_assert_uimm_bits!(LANE, 3); vmlaq_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4750,6 +5060,7 @@ pub unsafe fn vmlaq_laneq_s16(a: int16x8_t, b: int16x8_t, c: in #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 1); vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -4765,6 +5076,7 @@ pub unsafe fn vmla_lane_s32(a: int32x2_t, b: int32x2_t, c: int3 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 2); vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -4780,6 +5092,7 @@ pub unsafe fn vmla_laneq_s32(a: int32x2_t, b: int32x2_t, c: int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 1); vmlaq_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4795,6 +5108,7 @@ pub unsafe fn vmlaq_lane_s32(a: int32x4_t, b: int32x4_t, c: int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 2); vmlaq_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4810,6 +5124,7 @@ pub unsafe fn vmlaq_laneq_s32(a: int32x4_t, b: int32x4_t, c: in #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { static_assert_uimm_bits!(LANE, 2); vmla_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4825,6 +5140,7 @@ pub unsafe fn vmla_lane_u16(a: uint16x4_t, b: uint16x4_t, c: ui #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { static_assert_uimm_bits!(LANE, 3); vmla_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4840,6 +5156,7 @@ pub unsafe fn vmla_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { static_assert_uimm_bits!(LANE, 2); vmlaq_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4855,6 +5172,7 @@ pub unsafe fn vmlaq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { static_assert_uimm_bits!(LANE, 3); vmlaq_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4870,6 +5188,7 @@ pub unsafe fn vmlaq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { static_assert_uimm_bits!(LANE, 1); vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -4885,6 +5204,7 @@ pub unsafe fn vmla_lane_u32(a: uint32x2_t, b: uint32x2_t, c: ui #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { static_assert_uimm_bits!(LANE, 2); vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -4900,6 +5220,7 @@ pub unsafe fn vmla_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 1); vmlaq_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4915,6 +5236,7 @@ pub unsafe fn vmlaq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 2); vmlaq_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4930,6 +5252,7 @@ pub unsafe fn vmlaq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { static_assert_uimm_bits!(LANE, 1); vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -4945,6 +5268,7 @@ pub unsafe fn vmla_lane_f32(a: float32x2_t, b: float32x2_t, c: #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmla_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { static_assert_uimm_bits!(LANE, 2); vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -4960,6 +5284,7 @@ pub unsafe fn vmla_laneq_f32(a: float32x2_t, b: float32x2_t, c: #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { static_assert_uimm_bits!(LANE, 1); vmlaq_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4975,6 +5300,7 @@ pub unsafe fn vmlaq_lane_f32(a: float32x4_t, b: float32x4_t, c: #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlaq_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { static_assert_uimm_bits!(LANE, 2); vmlaq_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4989,6 +5315,7 @@ pub unsafe fn vmlaq_laneq_f32(a: float32x4_t, b: float32x4_t, c #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { simd_add(a, vmull_s8(b, c)) } @@ -5002,6 +5329,7 @@ pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { simd_add(a, vmull_s16(b, c)) } @@ -5015,6 +5343,7 @@ pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { simd_add(a, vmull_s32(b, c)) } @@ -5028,6 +5357,7 @@ pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { simd_add(a, vmull_u8(b, c)) } @@ -5041,6 +5371,7 @@ pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { simd_add(a, vmull_u16(b, c)) } @@ -5054,6 +5385,7 @@ pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { simd_add(a, vmull_u32(b, c)) } @@ -5067,6 +5399,7 @@ pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vmlal_s16(a, b, vdup_n_s16(c)) } @@ -5080,6 +5413,7 @@ pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vmlal_s32(a, b, vdup_n_s32(c)) } @@ -5093,6 +5427,7 @@ pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { vmlal_u16(a, b, vdup_n_u16(c)) } @@ -5106,6 +5441,7 @@ pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { vmlal_u32(a, b, vdup_n_u32(c)) } @@ -5120,6 +5456,7 @@ pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 2); vmlal_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5135,6 +5472,7 @@ pub unsafe fn vmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 3); vmlal_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5150,6 +5488,7 @@ pub unsafe fn vmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: in #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { static_assert_uimm_bits!(LANE, 1); vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -5165,6 +5504,7 @@ pub unsafe fn vmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { static_assert_uimm_bits!(LANE, 2); vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -5180,6 +5520,7 @@ pub unsafe fn vmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: in #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 2); vmlal_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5195,6 +5536,7 @@ pub unsafe fn vmlal_lane_u16(a: uint32x4_t, b: uint16x4_t, c: u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 3); vmlal_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5210,6 +5552,7 @@ pub unsafe fn vmlal_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { static_assert_uimm_bits!(LANE, 1); vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -5225,6 +5568,7 @@ pub unsafe fn vmlal_lane_u32(a: uint64x2_t, b: uint32x2_t, c: u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlal_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { static_assert_uimm_bits!(LANE, 2); vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -5239,6 +5583,7 @@ pub unsafe fn vmlal_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { simd_sub(a, simd_mul(b, c)) } @@ -5252,6 +5597,7 @@ pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { simd_sub(a, simd_mul(b, c)) } @@ -5265,6 +5611,7 @@ pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { simd_sub(a, simd_mul(b, c)) } @@ -5278,6 +5625,7 @@ pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { simd_sub(a, simd_mul(b, c)) } @@ -5291,6 +5639,7 @@ pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { simd_sub(a, simd_mul(b, c)) } @@ -5304,6 +5653,7 @@ pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { simd_sub(a, simd_mul(b, c)) } @@ -5317,6 +5667,7 @@ pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { simd_sub(a, simd_mul(b, c)) } @@ -5330,6 +5681,7 @@ pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { simd_sub(a, simd_mul(b, c)) } @@ -5343,6 +5695,7 @@ pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { simd_sub(a, simd_mul(b, c)) } @@ -5356,6 +5709,7 @@ pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { simd_sub(a, simd_mul(b, c)) } @@ -5369,6 +5723,7 @@ pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { simd_sub(a, simd_mul(b, c)) } @@ -5382,6 +5737,7 @@ pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { simd_sub(a, simd_mul(b, c)) } @@ -5395,6 +5751,7 @@ pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { simd_sub(a, simd_mul(b, c)) } @@ -5408,6 +5765,7 @@ pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { simd_sub(a, simd_mul(b, c)) } @@ -5421,6 +5779,7 @@ pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { vmls_s16(a, b, vdup_n_s16(c)) } @@ -5434,6 +5793,7 @@ pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { vmlsq_s16(a, b, vdupq_n_s16(c)) } @@ -5447,6 +5807,7 @@ pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { vmls_s32(a, b, vdup_n_s32(c)) } @@ -5460,6 +5821,7 @@ pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { vmlsq_s32(a, b, vdupq_n_s32(c)) } @@ -5473,6 +5835,7 @@ pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { vmls_u16(a, b, vdup_n_u16(c)) } @@ -5486,6 +5849,7 @@ pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { vmlsq_u16(a, b, vdupq_n_u16(c)) } @@ -5499,6 +5863,7 @@ pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { vmls_u32(a, b, vdup_n_u32(c)) } @@ -5512,6 +5877,7 @@ pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { vmlsq_u32(a, b, vdupq_n_u32(c)) } @@ -5525,6 +5891,7 @@ pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vmls_f32(a, b, vdup_n_f32(c)) } @@ -5538,6 +5905,7 @@ pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vmlsq_f32(a, b, vdupq_n_f32(c)) } @@ -5552,6 +5920,7 @@ pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { static_assert_uimm_bits!(LANE, 2); vmls_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5567,6 +5936,7 @@ pub unsafe fn vmls_lane_s16(a: int16x4_t, b: int16x4_t, c: int1 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { static_assert_uimm_bits!(LANE, 3); vmls_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5582,6 +5952,7 @@ pub unsafe fn vmls_laneq_s16(a: int16x4_t, b: int16x4_t, c: int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { static_assert_uimm_bits!(LANE, 2); vmlsq_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5597,6 +5968,7 @@ pub unsafe fn vmlsq_lane_s16(a: int16x8_t, b: int16x8_t, c: int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { static_assert_uimm_bits!(LANE, 3); vmlsq_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5612,6 +5984,7 @@ pub unsafe fn vmlsq_laneq_s16(a: int16x8_t, b: int16x8_t, c: in #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 1); vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -5627,6 +6000,7 @@ pub unsafe fn vmls_lane_s32(a: int32x2_t, b: int32x2_t, c: int3 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 2); vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -5642,6 +6016,7 @@ pub unsafe fn vmls_laneq_s32(a: int32x2_t, b: int32x2_t, c: int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 1); vmlsq_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5657,6 +6032,7 @@ pub unsafe fn vmlsq_lane_s32(a: int32x4_t, b: int32x4_t, c: int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 2); vmlsq_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5672,6 +6048,7 @@ pub unsafe fn vmlsq_laneq_s32(a: int32x4_t, b: int32x4_t, c: in #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { static_assert_uimm_bits!(LANE, 2); vmls_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5687,6 +6064,7 @@ pub unsafe fn vmls_lane_u16(a: uint16x4_t, b: uint16x4_t, c: ui #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { static_assert_uimm_bits!(LANE, 3); vmls_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5702,6 +6080,7 @@ pub unsafe fn vmls_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { static_assert_uimm_bits!(LANE, 2); vmlsq_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5717,6 +6096,7 @@ pub unsafe fn vmlsq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { static_assert_uimm_bits!(LANE, 3); vmlsq_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5732,6 +6112,7 @@ pub unsafe fn vmlsq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { static_assert_uimm_bits!(LANE, 1); vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -5747,6 +6128,7 @@ pub unsafe fn vmls_lane_u32(a: uint32x2_t, b: uint32x2_t, c: ui #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { static_assert_uimm_bits!(LANE, 2); vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -5762,6 +6144,7 @@ pub unsafe fn vmls_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 1); vmlsq_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5777,6 +6160,7 @@ pub unsafe fn vmlsq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 2); vmlsq_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5792,6 +6176,7 @@ pub unsafe fn vmlsq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_lane_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { static_assert_uimm_bits!(LANE, 1); vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -5807,6 +6192,7 @@ pub unsafe fn vmls_lane_f32(a: float32x2_t, b: float32x2_t, c: #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmls_laneq_f32(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { static_assert_uimm_bits!(LANE, 2); vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -5822,6 +6208,7 @@ pub unsafe fn vmls_laneq_f32(a: float32x2_t, b: float32x2_t, c: #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_lane_f32(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { static_assert_uimm_bits!(LANE, 1); vmlsq_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5837,6 +6224,7 @@ pub unsafe fn vmlsq_lane_f32(a: float32x4_t, b: float32x4_t, c: #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsq_laneq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { static_assert_uimm_bits!(LANE, 2); vmlsq_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5851,6 +6239,7 @@ pub unsafe fn vmlsq_laneq_f32(a: float32x4_t, b: float32x4_t, c #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { simd_sub(a, vmull_s8(b, c)) } @@ -5864,6 +6253,7 @@ pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { simd_sub(a, vmull_s16(b, c)) } @@ -5877,6 +6267,7 @@ pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { simd_sub(a, vmull_s32(b, c)) } @@ -5890,6 +6281,7 @@ pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { simd_sub(a, vmull_u8(b, c)) } @@ -5903,6 +6295,7 @@ pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { simd_sub(a, vmull_u16(b, c)) } @@ -5916,6 +6309,7 @@ pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { simd_sub(a, vmull_u32(b, c)) } @@ -5929,6 +6323,7 @@ pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vmlsl_s16(a, b, vdup_n_s16(c)) } @@ -5942,6 +6337,7 @@ pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vmlsl_s32(a, b, vdup_n_s32(c)) } @@ -5955,6 +6351,7 @@ pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { vmlsl_u16(a, b, vdup_n_u16(c)) } @@ -5968,6 +6365,7 @@ pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { vmlsl_u32(a, b, vdup_n_u32(c)) } @@ -5982,6 +6380,7 @@ pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 2); vmlsl_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5997,6 +6396,7 @@ pub unsafe fn vmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 3); vmlsl_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -6012,6 +6412,7 @@ pub unsafe fn vmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: in #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { static_assert_uimm_bits!(LANE, 1); vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -6027,6 +6428,7 @@ pub unsafe fn vmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { static_assert_uimm_bits!(LANE, 2); vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -6042,6 +6444,7 @@ pub unsafe fn vmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: in #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 2); vmlsl_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -6057,6 +6460,7 @@ pub unsafe fn vmlsl_lane_u16(a: uint32x4_t, b: uint16x4_t, c: u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 3); vmlsl_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -6072,6 +6476,7 @@ pub unsafe fn vmlsl_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { static_assert_uimm_bits!(LANE, 1); vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -6087,6 +6492,7 @@ pub unsafe fn vmlsl_lane_u32(a: uint64x2_t, b: uint32x2_t, c: u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmlsl_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { static_assert_uimm_bits!(LANE, 2); vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32])) @@ -6101,6 +6507,7 @@ pub unsafe fn vmlsl_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { simd_neg(a) } @@ -6114,6 +6521,7 @@ pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { simd_neg(a) } @@ -6127,6 +6535,7 @@ pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { simd_neg(a) } @@ -6140,6 +6549,7 @@ pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { simd_neg(a) } @@ -6153,6 +6563,7 @@ pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { simd_neg(a) } @@ -6166,6 +6577,7 @@ pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { simd_neg(a) } @@ -6179,6 +6591,7 @@ pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fneg))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { simd_neg(a) } @@ -6192,6 +6605,7 @@ pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fneg))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { simd_neg(a) } @@ -6205,6 +6619,7 @@ pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6224,6 +6639,7 @@ vqneg_s8_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6243,6 +6659,7 @@ vqnegq_s8_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6262,6 +6679,7 @@ vqneg_s16_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6281,6 +6699,7 @@ vqnegq_s16_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6300,6 +6719,7 @@ vqneg_s32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6319,6 +6739,7 @@ vqnegq_s32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6338,6 +6759,7 @@ vqsub_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6357,6 +6779,7 @@ vqsubq_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6376,6 +6799,7 @@ vqsub_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6395,6 +6819,7 @@ vqsubq_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6414,6 +6839,7 @@ vqsub_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6433,6 +6859,7 @@ vqsubq_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6452,6 +6879,7 @@ vqsub_u64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6471,6 +6899,7 @@ vqsubq_u64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6490,6 +6919,7 @@ vqsub_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6509,6 +6939,7 @@ vqsubq_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6528,6 +6959,7 @@ vqsub_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6547,6 +6979,7 @@ vqsubq_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6566,6 +6999,7 @@ vqsub_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6585,6 +7019,7 @@ vqsubq_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6604,6 +7039,7 @@ vqsub_s64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6623,6 +7059,7 @@ vqsubq_s64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6642,6 +7079,7 @@ vhadd_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6661,6 +7099,7 @@ vhaddq_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6680,6 +7119,7 @@ vhadd_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6699,6 +7139,7 @@ vhaddq_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6718,6 +7159,7 @@ vhadd_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6737,6 +7179,7 @@ vhaddq_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6756,6 +7199,7 @@ vhadd_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6775,6 +7219,7 @@ vhaddq_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6794,6 +7239,7 @@ vhadd_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6813,6 +7259,7 @@ vhaddq_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6832,6 +7279,7 @@ vhadd_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6851,6 +7299,7 @@ vhaddq_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6870,6 +7319,7 @@ vrhadd_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6889,6 +7339,7 @@ vrhaddq_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6908,6 +7359,7 @@ vrhadd_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6927,6 +7379,7 @@ vrhaddq_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6946,6 +7399,7 @@ vrhadd_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6965,6 +7419,7 @@ vrhaddq_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6984,6 +7439,7 @@ vrhadd_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7003,6 +7459,7 @@ vrhaddq_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7022,6 +7479,7 @@ vrhadd_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7041,6 +7499,7 @@ vrhaddq_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7060,6 +7519,7 @@ vrhadd_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7079,6 +7539,7 @@ vrhaddq_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frintn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7098,6 +7559,7 @@ vrndn_f32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frintn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7117,6 +7579,7 @@ vrndnq_f32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7136,6 +7599,7 @@ vqadd_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7155,6 +7619,7 @@ vqaddq_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7174,6 +7639,7 @@ vqadd_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7193,6 +7659,7 @@ vqaddq_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7212,6 +7679,7 @@ vqadd_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7231,6 +7699,7 @@ vqaddq_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7250,6 +7719,7 @@ vqadd_u64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7269,6 +7739,7 @@ vqaddq_u64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7288,6 +7759,7 @@ vqadd_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7307,6 +7779,7 @@ vqaddq_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7326,6 +7799,7 @@ vqadd_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7345,6 +7819,7 @@ vqaddq_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7364,6 +7839,7 @@ vqadd_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7383,6 +7859,7 @@ vqaddq_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7402,6 +7879,7 @@ vqadd_s64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7421,6 +7899,7 @@ vqaddq_s64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7440,6 +7919,7 @@ vld1_s8_x2_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7459,6 +7939,7 @@ vld1_s16_x2_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7478,6 +7959,7 @@ vld1_s32_x2_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7497,6 +7979,7 @@ vld1_s64_x2_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7516,6 +7999,7 @@ vld1q_s8_x2_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7535,6 +8019,7 @@ vld1q_s16_x2_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7554,6 +8039,7 @@ vld1q_s32_x2_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7573,6 +8059,7 @@ vld1q_s64_x2_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7592,6 +8079,7 @@ vld1_s8_x3_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7611,6 +8099,7 @@ vld1_s16_x3_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7630,6 +8119,7 @@ vld1_s32_x3_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7649,6 +8139,7 @@ vld1_s64_x3_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7668,6 +8159,7 @@ vld1q_s8_x3_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7687,6 +8179,7 @@ vld1q_s16_x3_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7706,6 +8199,7 @@ vld1q_s32_x3_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7725,6 +8219,7 @@ vld1q_s64_x3_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7744,6 +8239,7 @@ vld1_s8_x4_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7763,6 +8259,7 @@ vld1_s16_x4_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7782,6 +8279,7 @@ vld1_s32_x4_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7801,6 +8299,7 @@ vld1_s64_x4_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7820,6 +8319,7 @@ vld1q_s8_x4_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7839,6 +8339,7 @@ vld1q_s16_x4_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7858,6 +8359,7 @@ vld1q_s32_x4_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7877,6 +8379,7 @@ vld1q_s64_x4_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { transmute(vld1_s8_x2(transmute(a))) } @@ -7890,6 +8393,7 @@ pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { transmute(vld1_s16_x2(transmute(a))) } @@ -7903,6 +8407,7 @@ pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { transmute(vld1_s32_x2(transmute(a))) } @@ -7916,6 +8421,7 @@ pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { transmute(vld1_s64_x2(transmute(a))) } @@ -7929,6 +8435,7 @@ pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { transmute(vld1q_s8_x2(transmute(a))) } @@ -7942,6 +8449,7 @@ pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { transmute(vld1q_s16_x2(transmute(a))) } @@ -7955,6 +8463,7 @@ pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { transmute(vld1q_s32_x2(transmute(a))) } @@ -7968,6 +8477,7 @@ pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { transmute(vld1q_s64_x2(transmute(a))) } @@ -7981,6 +8491,7 @@ pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { transmute(vld1_s8_x3(transmute(a))) } @@ -7994,6 +8505,7 @@ pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { transmute(vld1_s16_x3(transmute(a))) } @@ -8007,6 +8519,7 @@ pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { transmute(vld1_s32_x3(transmute(a))) } @@ -8020,6 +8533,7 @@ pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { transmute(vld1_s64_x3(transmute(a))) } @@ -8033,6 +8547,7 @@ pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { transmute(vld1q_s8_x3(transmute(a))) } @@ -8046,6 +8561,7 @@ pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { transmute(vld1q_s16_x3(transmute(a))) } @@ -8059,6 +8575,7 @@ pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { transmute(vld1q_s32_x3(transmute(a))) } @@ -8072,6 +8589,7 @@ pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { transmute(vld1q_s64_x3(transmute(a))) } @@ -8085,6 +8603,7 @@ pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { transmute(vld1_s8_x4(transmute(a))) } @@ -8098,6 +8617,7 @@ pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { transmute(vld1_s16_x4(transmute(a))) } @@ -8111,6 +8631,7 @@ pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { transmute(vld1_s32_x4(transmute(a))) } @@ -8124,6 +8645,7 @@ pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { transmute(vld1_s64_x4(transmute(a))) } @@ -8137,6 +8659,7 @@ pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { transmute(vld1q_s8_x4(transmute(a))) } @@ -8150,6 +8673,7 @@ pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { transmute(vld1q_s16_x4(transmute(a))) } @@ -8163,6 +8687,7 @@ pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { transmute(vld1q_s32_x4(transmute(a))) } @@ -8176,6 +8701,7 @@ pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { transmute(vld1q_s64_x4(transmute(a))) } @@ -8189,6 +8715,7 @@ pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { transmute(vld1_s8_x2(transmute(a))) } @@ -8202,6 +8729,7 @@ pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { transmute(vld1_s8_x3(transmute(a))) } @@ -8215,6 +8743,7 @@ pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { transmute(vld1_s8_x4(transmute(a))) } @@ -8228,6 +8757,7 @@ pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { transmute(vld1q_s8_x2(transmute(a))) } @@ -8241,6 +8771,7 @@ pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { transmute(vld1q_s8_x3(transmute(a))) } @@ -8254,6 +8785,7 @@ pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { transmute(vld1q_s8_x4(transmute(a))) } @@ -8267,6 +8799,7 @@ pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { transmute(vld1_s16_x2(transmute(a))) } @@ -8280,6 +8813,7 @@ pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { transmute(vld1_s16_x3(transmute(a))) } @@ -8293,6 +8827,7 @@ pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { transmute(vld1_s16_x4(transmute(a))) } @@ -8306,6 +8841,7 @@ pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { transmute(vld1q_s16_x2(transmute(a))) } @@ -8319,6 +8855,7 @@ pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { transmute(vld1q_s16_x3(transmute(a))) } @@ -8332,6 +8869,7 @@ pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { transmute(vld1q_s16_x4(transmute(a))) } @@ -8345,6 +8883,7 @@ pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { transmute(vld1_s64_x2(transmute(a))) } @@ -8358,6 +8897,7 @@ pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { transmute(vld1_s64_x3(transmute(a))) } @@ -8371,6 +8911,7 @@ pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { transmute(vld1_s64_x4(transmute(a))) } @@ -8384,6 +8925,7 @@ pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { transmute(vld1q_s64_x2(transmute(a))) } @@ -8397,6 +8939,7 @@ pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { transmute(vld1q_s64_x3(transmute(a))) } @@ -8410,6 +8953,7 @@ pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { transmute(vld1q_s64_x4(transmute(a))) } @@ -8423,6 +8967,7 @@ pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8442,6 +8987,7 @@ vld1_f32_x2_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8461,6 +9007,7 @@ vld1q_f32_x2_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8480,6 +9027,7 @@ vld1_f32_x3_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8499,6 +9047,7 @@ vld1q_f32_x3_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8518,6 +9067,7 @@ vld1_f32_x4_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8535,6 +9085,7 @@ vld1q_f32_x4_(a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8568,6 +9119,7 @@ vld2_s8_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8601,6 +9153,7 @@ vld2_s16_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8634,6 +9187,7 @@ vld2_s32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8667,6 +9221,7 @@ vld2q_s8_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8700,6 +9255,7 @@ vld2q_s16_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8733,6 +9289,7 @@ vld2q_s32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(nop))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8768,6 +9325,7 @@ vld2_s64_(a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { transmute(vld2_s8(transmute(a))) } @@ -8781,6 +9339,7 @@ pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { transmute(vld2_s16(transmute(a))) } @@ -8794,6 +9353,7 @@ pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { transmute(vld2_s32(transmute(a))) } @@ -8807,6 +9367,7 @@ pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { transmute(vld2q_s8(transmute(a))) } @@ -8820,6 +9381,7 @@ pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { transmute(vld2q_s16(transmute(a))) } @@ -8833,6 +9395,7 @@ pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { transmute(vld2q_s32(transmute(a))) } @@ -8846,6 +9409,7 @@ pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { transmute(vld2_s8(transmute(a))) } @@ -8859,6 +9423,7 @@ pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { transmute(vld2_s16(transmute(a))) } @@ -8872,6 +9437,7 @@ pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { transmute(vld2q_s8(transmute(a))) } @@ -8885,6 +9451,7 @@ pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { transmute(vld2q_s16(transmute(a))) } @@ -8898,6 +9465,7 @@ pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { transmute(vld2_s64(transmute(a))) } @@ -8911,6 +9479,7 @@ pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t { transmute(vld2_s64(transmute(a))) } @@ -8922,6 +9491,7 @@ pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8955,6 +9525,7 @@ vld2_f32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8988,6 +9559,7 @@ vld2q_f32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9021,6 +9593,7 @@ vld2_dup_s8_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9054,6 +9627,7 @@ vld2_dup_s16_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9087,6 +9661,7 @@ vld2_dup_s32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9120,6 +9695,7 @@ vld2q_dup_s8_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9153,6 +9729,7 @@ vld2q_dup_s16_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9186,6 +9763,7 @@ vld2q_dup_s32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(nop))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9221,6 +9799,7 @@ vld2_dup_s64_(a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { transmute(vld2_dup_s8(transmute(a))) } @@ -9234,6 +9813,7 @@ pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { transmute(vld2_dup_s16(transmute(a))) } @@ -9247,6 +9827,7 @@ pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { transmute(vld2_dup_s32(transmute(a))) } @@ -9260,6 +9841,7 @@ pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { transmute(vld2q_dup_s8(transmute(a))) } @@ -9273,6 +9855,7 @@ pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { transmute(vld2q_dup_s16(transmute(a))) } @@ -9286,6 +9869,7 @@ pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { transmute(vld2q_dup_s32(transmute(a))) } @@ -9299,6 +9883,7 @@ pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { transmute(vld2_dup_s8(transmute(a))) } @@ -9312,6 +9897,7 @@ pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { transmute(vld2_dup_s16(transmute(a))) } @@ -9325,6 +9911,7 @@ pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { transmute(vld2q_dup_s8(transmute(a))) } @@ -9338,6 +9925,7 @@ pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { transmute(vld2q_dup_s16(transmute(a))) } @@ -9351,6 +9939,7 @@ pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { transmute(vld2_dup_s64(transmute(a))) } @@ -9364,6 +9953,7 @@ pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t { transmute(vld2_dup_s64(transmute(a))) } @@ -9375,6 +9965,7 @@ pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9408,6 +9999,7 @@ vld2_dup_f32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9442,6 +10034,7 @@ vld2q_dup_f32_(a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t { static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] @@ -9479,6 +10072,7 @@ vld2_lane_s8_(b.0, b.1, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -9516,6 +10110,7 @@ vld2_lane_s16_(b.0, b.1, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t { static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] @@ -9553,6 +10148,7 @@ vld2_lane_s32_(b.0, b.1, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t { static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] @@ -9590,6 +10186,7 @@ vld2q_lane_s16_(b.0, b.1, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -9629,6 +10226,7 @@ vld2q_lane_s32_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uint8x8x2_t { static_assert_uimm_bits!(LANE, 3); transmute(vld2_lane_s8::(transmute(a), transmute(b))) @@ -9644,6 +10242,7 @@ pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uin #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> uint16x4x2_t { static_assert_uimm_bits!(LANE, 2); transmute(vld2_lane_s16::(transmute(a), transmute(b))) @@ -9659,6 +10258,7 @@ pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> uint32x2x2_t { static_assert_uimm_bits!(LANE, 1); transmute(vld2_lane_s32::(transmute(a), transmute(b))) @@ -9674,6 +10274,7 @@ pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> uint16x8x2_t { static_assert_uimm_bits!(LANE, 3); transmute(vld2q_lane_s16::(transmute(a), transmute(b))) @@ -9689,6 +10290,7 @@ pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> uint32x4x2_t { static_assert_uimm_bits!(LANE, 2); transmute(vld2q_lane_s32::(transmute(a), transmute(b))) @@ -9704,6 +10306,7 @@ pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> poly8x8x2_t { static_assert_uimm_bits!(LANE, 3); transmute(vld2_lane_s8::(transmute(a), transmute(b))) @@ -9719,6 +10322,7 @@ pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> pol #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> poly16x4x2_t { static_assert_uimm_bits!(LANE, 2); transmute(vld2_lane_s16::(transmute(a), transmute(b))) @@ -9734,6 +10338,7 @@ pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> poly16x8x2_t { static_assert_uimm_bits!(LANE, 3); transmute(vld2q_lane_s16::(transmute(a), transmute(b))) @@ -9747,6 +10352,7 @@ pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t { static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] @@ -9784,6 +10390,7 @@ vld2_lane_f32_(b.0, b.1, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) -> float32x4x2_t { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -9820,6 +10427,7 @@ vld2q_lane_f32_(b.0, b.1, LANE as i64, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9853,6 +10461,7 @@ vld3_s8_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9886,6 +10495,7 @@ vld3_s16_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9919,6 +10529,7 @@ vld3_s32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9952,6 +10563,7 @@ vld3q_s8_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -9985,6 +10597,7 @@ vld3q_s16_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10018,6 +10631,7 @@ vld3q_s32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(nop))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10053,6 +10667,7 @@ vld3_s64_(a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { transmute(vld3_s8(transmute(a))) } @@ -10066,6 +10681,7 @@ pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { transmute(vld3_s16(transmute(a))) } @@ -10079,6 +10695,7 @@ pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { transmute(vld3_s32(transmute(a))) } @@ -10092,6 +10709,7 @@ pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { transmute(vld3q_s8(transmute(a))) } @@ -10105,6 +10723,7 @@ pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { transmute(vld3q_s16(transmute(a))) } @@ -10118,6 +10737,7 @@ pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { transmute(vld3q_s32(transmute(a))) } @@ -10131,6 +10751,7 @@ pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { transmute(vld3_s8(transmute(a))) } @@ -10144,6 +10765,7 @@ pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { transmute(vld3_s16(transmute(a))) } @@ -10157,6 +10779,7 @@ pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { transmute(vld3q_s8(transmute(a))) } @@ -10170,6 +10793,7 @@ pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { transmute(vld3q_s16(transmute(a))) } @@ -10183,6 +10807,7 @@ pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { transmute(vld3_s64(transmute(a))) } @@ -10196,6 +10821,7 @@ pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { transmute(vld3_s64(transmute(a))) } @@ -10207,6 +10833,7 @@ pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10240,6 +10867,7 @@ vld3_f32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10273,6 +10901,7 @@ vld3q_f32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10306,6 +10935,7 @@ vld3_dup_s8_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10339,6 +10969,7 @@ vld3_dup_s16_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10372,6 +11003,7 @@ vld3_dup_s32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10405,6 +11037,7 @@ vld3q_dup_s8_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10438,6 +11071,7 @@ vld3q_dup_s16_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10471,6 +11105,7 @@ vld3q_dup_s32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(nop))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10506,6 +11141,7 @@ vld3_dup_s64_(a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { transmute(vld3_dup_s8(transmute(a))) } @@ -10519,6 +11155,7 @@ pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { transmute(vld3_dup_s16(transmute(a))) } @@ -10532,6 +11169,7 @@ pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { transmute(vld3_dup_s32(transmute(a))) } @@ -10545,6 +11183,7 @@ pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { transmute(vld3q_dup_s8(transmute(a))) } @@ -10558,6 +11197,7 @@ pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { transmute(vld3q_dup_s16(transmute(a))) } @@ -10571,6 +11211,7 @@ pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { transmute(vld3q_dup_s32(transmute(a))) } @@ -10584,6 +11225,7 @@ pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { transmute(vld3_dup_s8(transmute(a))) } @@ -10597,6 +11239,7 @@ pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { transmute(vld3_dup_s16(transmute(a))) } @@ -10610,6 +11253,7 @@ pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { transmute(vld3q_dup_s8(transmute(a))) } @@ -10623,6 +11267,7 @@ pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { transmute(vld3q_dup_s16(transmute(a))) } @@ -10636,6 +11281,7 @@ pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { transmute(vld3_dup_s64(transmute(a))) } @@ -10649,6 +11295,7 @@ pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t { transmute(vld3_dup_s64(transmute(a))) } @@ -10660,6 +11307,7 @@ pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10693,6 +11341,7 @@ vld3_dup_f32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -10727,6 +11376,7 @@ vld3q_dup_f32_(a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t { static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] @@ -10764,6 +11414,7 @@ vld3_lane_s8_(b.0, b.1, b.2, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -10801,6 +11452,7 @@ vld3_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t { static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] @@ -10838,6 +11490,7 @@ vld3_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t { static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] @@ -10875,6 +11528,7 @@ vld3q_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -10914,6 +11568,7 @@ vld3q_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t { static_assert_uimm_bits!(LANE, 3); transmute(vld3_lane_s8::(transmute(a), transmute(b))) @@ -10929,6 +11584,7 @@ pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uin #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t { static_assert_uimm_bits!(LANE, 2); transmute(vld3_lane_s16::(transmute(a), transmute(b))) @@ -10944,6 +11600,7 @@ pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t { static_assert_uimm_bits!(LANE, 1); transmute(vld3_lane_s32::(transmute(a), transmute(b))) @@ -10959,6 +11616,7 @@ pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t { static_assert_uimm_bits!(LANE, 3); transmute(vld3q_lane_s16::(transmute(a), transmute(b))) @@ -10974,6 +11632,7 @@ pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t { static_assert_uimm_bits!(LANE, 2); transmute(vld3q_lane_s32::(transmute(a), transmute(b))) @@ -10989,6 +11648,7 @@ pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t { static_assert_uimm_bits!(LANE, 3); transmute(vld3_lane_s8::(transmute(a), transmute(b))) @@ -11004,6 +11664,7 @@ pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> pol #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t { static_assert_uimm_bits!(LANE, 2); transmute(vld3_lane_s16::(transmute(a), transmute(b))) @@ -11019,6 +11680,7 @@ pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t { static_assert_uimm_bits!(LANE, 3); transmute(vld3q_lane_s16::(transmute(a), transmute(b))) @@ -11032,6 +11694,7 @@ pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t { static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] @@ -11069,6 +11732,7 @@ vld3_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) -> float32x4x3_t { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -11105,6 +11769,7 @@ vld3q_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11138,6 +11803,7 @@ vld4_s8_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11171,6 +11837,7 @@ vld4_s16_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11204,6 +11871,7 @@ vld4_s32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11237,6 +11905,7 @@ vld4q_s8_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11270,6 +11939,7 @@ vld4q_s16_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11303,6 +11973,7 @@ vld4q_s32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(nop))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11338,6 +12009,7 @@ vld4_s64_(a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { transmute(vld4_s8(transmute(a))) } @@ -11351,6 +12023,7 @@ pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { transmute(vld4_s16(transmute(a))) } @@ -11364,6 +12037,7 @@ pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { transmute(vld4_s32(transmute(a))) } @@ -11377,6 +12051,7 @@ pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { transmute(vld4q_s8(transmute(a))) } @@ -11390,6 +12065,7 @@ pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { transmute(vld4q_s16(transmute(a))) } @@ -11403,6 +12079,7 @@ pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { transmute(vld4q_s32(transmute(a))) } @@ -11416,6 +12093,7 @@ pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { transmute(vld4_s8(transmute(a))) } @@ -11429,6 +12107,7 @@ pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { transmute(vld4_s16(transmute(a))) } @@ -11442,6 +12121,7 @@ pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { transmute(vld4q_s8(transmute(a))) } @@ -11455,6 +12135,7 @@ pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { transmute(vld4q_s16(transmute(a))) } @@ -11468,6 +12149,7 @@ pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { transmute(vld4_s64(transmute(a))) } @@ -11481,6 +12163,7 @@ pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { transmute(vld4_s64(transmute(a))) } @@ -11492,6 +12175,7 @@ pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11525,6 +12209,7 @@ vld4_f32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11558,6 +12243,7 @@ vld4q_f32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11591,6 +12277,7 @@ vld4_dup_s8_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11624,6 +12311,7 @@ vld4_dup_s16_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11657,6 +12345,7 @@ vld4_dup_s32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11690,6 +12379,7 @@ vld4q_dup_s8_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11723,6 +12413,7 @@ vld4q_dup_s16_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11756,6 +12447,7 @@ vld4q_dup_s32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(nop))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11791,6 +12483,7 @@ vld4_dup_s64_(a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { transmute(vld4_dup_s8(transmute(a))) } @@ -11804,6 +12497,7 @@ pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { transmute(vld4_dup_s16(transmute(a))) } @@ -11817,6 +12511,7 @@ pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { transmute(vld4_dup_s32(transmute(a))) } @@ -11830,6 +12525,7 @@ pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { transmute(vld4q_dup_s8(transmute(a))) } @@ -11843,6 +12539,7 @@ pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { transmute(vld4q_dup_s16(transmute(a))) } @@ -11856,6 +12553,7 @@ pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { transmute(vld4q_dup_s32(transmute(a))) } @@ -11869,6 +12567,7 @@ pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { transmute(vld4_dup_s8(transmute(a))) } @@ -11882,6 +12581,7 @@ pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { transmute(vld4_dup_s16(transmute(a))) } @@ -11895,6 +12595,7 @@ pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { transmute(vld4q_dup_s8(transmute(a))) } @@ -11908,6 +12609,7 @@ pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { transmute(vld4q_dup_s16(transmute(a))) } @@ -11921,6 +12623,7 @@ pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { transmute(vld4_dup_s64(transmute(a))) } @@ -11934,6 +12637,7 @@ pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { transmute(vld4_dup_s64(transmute(a))) } @@ -11945,6 +12649,7 @@ pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -11978,6 +12683,7 @@ vld4_dup_f32_(a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12012,6 +12718,7 @@ vld4q_dup_f32_(a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t { static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] @@ -12049,6 +12756,7 @@ vld4_lane_s8_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -12086,6 +12794,7 @@ vld4_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t { static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] @@ -12123,6 +12832,7 @@ vld4_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t { static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] @@ -12160,6 +12870,7 @@ vld4q_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -12199,6 +12910,7 @@ vld4q_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t { static_assert_uimm_bits!(LANE, 3); transmute(vld4_lane_s8::(transmute(a), transmute(b))) @@ -12214,6 +12926,7 @@ pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uin #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t { static_assert_uimm_bits!(LANE, 2); transmute(vld4_lane_s16::(transmute(a), transmute(b))) @@ -12229,6 +12942,7 @@ pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t { static_assert_uimm_bits!(LANE, 1); transmute(vld4_lane_s32::(transmute(a), transmute(b))) @@ -12244,6 +12958,7 @@ pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t { static_assert_uimm_bits!(LANE, 3); transmute(vld4q_lane_s16::(transmute(a), transmute(b))) @@ -12259,6 +12974,7 @@ pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t { static_assert_uimm_bits!(LANE, 2); transmute(vld4q_lane_s32::(transmute(a), transmute(b))) @@ -12274,6 +12990,7 @@ pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { static_assert_uimm_bits!(LANE, 3); transmute(vld4_lane_s8::(transmute(a), transmute(b))) @@ -12289,6 +13006,7 @@ pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> pol #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t { static_assert_uimm_bits!(LANE, 2); transmute(vld4_lane_s16::(transmute(a), transmute(b))) @@ -12304,6 +13022,7 @@ pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t { static_assert_uimm_bits!(LANE, 3); transmute(vld4q_lane_s16::(transmute(a), transmute(b))) @@ -12317,6 +13036,7 @@ pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t { static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] @@ -12354,6 +13074,7 @@ vld4_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) -> float32x4x4_t { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -12393,6 +13114,7 @@ vld4q_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { static_assert_uimm_bits!(LANE, 3); *a = simd_extract(b, LANE as u32); @@ -12408,6 +13130,7 @@ pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { static_assert_uimm_bits!(LANE, 2); *a = simd_extract(b, LANE as u32); @@ -12423,6 +13146,7 @@ pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { static_assert_uimm_bits!(LANE, 1); *a = simd_extract(b, LANE as u32); @@ -12438,6 +13162,7 @@ pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t) { static_assert!(LANE == 0); *a = simd_extract(b, LANE as u32); @@ -12453,6 +13178,7 @@ pub unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { static_assert_uimm_bits!(LANE, 4); *a = simd_extract(b, LANE as u32); @@ -12468,6 +13194,7 @@ pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { static_assert_uimm_bits!(LANE, 3); *a = simd_extract(b, LANE as u32); @@ -12483,6 +13210,7 @@ pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { static_assert_uimm_bits!(LANE, 2); *a = simd_extract(b, LANE as u32); @@ -12498,6 +13226,7 @@ pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { static_assert_uimm_bits!(LANE, 1); *a = simd_extract(b, LANE as u32); @@ -12513,6 +13242,7 @@ pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { static_assert_uimm_bits!(LANE, 3); *a = simd_extract(b, LANE as u32); @@ -12528,6 +13258,7 @@ pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { static_assert_uimm_bits!(LANE, 2); *a = simd_extract(b, LANE as u32); @@ -12543,6 +13274,7 @@ pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { static_assert_uimm_bits!(LANE, 1); *a = simd_extract(b, LANE as u32); @@ -12558,6 +13290,7 @@ pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { static_assert!(LANE == 0); *a = simd_extract(b, LANE as u32); @@ -12573,6 +13306,7 @@ pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { static_assert_uimm_bits!(LANE, 4); *a = simd_extract(b, LANE as u32); @@ -12588,6 +13322,7 @@ pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { static_assert_uimm_bits!(LANE, 3); *a = simd_extract(b, LANE as u32); @@ -12603,6 +13338,7 @@ pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { static_assert_uimm_bits!(LANE, 2); *a = simd_extract(b, LANE as u32); @@ -12618,6 +13354,7 @@ pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { static_assert_uimm_bits!(LANE, 1); *a = simd_extract(b, LANE as u32); @@ -12633,6 +13370,7 @@ pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { static_assert_uimm_bits!(LANE, 3); *a = simd_extract(b, LANE as u32); @@ -12648,6 +13386,7 @@ pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { static_assert_uimm_bits!(LANE, 2); *a = simd_extract(b, LANE as u32); @@ -12663,6 +13402,7 @@ pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { static_assert_uimm_bits!(LANE, 4); *a = simd_extract(b, LANE as u32); @@ -12678,6 +13418,7 @@ pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { static_assert_uimm_bits!(LANE, 3); *a = simd_extract(b, LANE as u32); @@ -12693,6 +13434,7 @@ pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_lane_p64(a: *mut p64, b: poly64x1_t) { static_assert!(LANE == 0); *a = simd_extract(b, LANE as u32); @@ -12708,6 +13450,7 @@ pub unsafe fn vst1_lane_p64(a: *mut p64, b: poly64x1_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { static_assert_uimm_bits!(LANE, 1); *a = simd_extract(b, LANE as u32); @@ -12723,6 +13466,7 @@ pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { static_assert_uimm_bits!(LANE, 1); *a = simd_extract(b, LANE as u32); @@ -12738,6 +13482,7 @@ pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { static_assert_uimm_bits!(LANE, 2); *a = simd_extract(b, LANE as u32); @@ -12750,6 +13495,7 @@ pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12783,6 +13529,7 @@ vst1_s8_x2_(b.0, b.1, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12816,6 +13563,7 @@ vst1_s16_x2_(b.0, b.1, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12849,6 +13597,7 @@ vst1_s32_x2_(b.0, b.1, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12882,6 +13631,7 @@ vst1_s64_x2_(b.0, b.1, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12915,6 +13665,7 @@ vst1q_s8_x2_(b.0, b.1, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12948,6 +13699,7 @@ vst1q_s16_x2_(b.0, b.1, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -12981,6 +13733,7 @@ vst1q_s32_x2_(b.0, b.1, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13014,6 +13767,7 @@ vst1q_s64_x2_(b.0, b.1, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13047,6 +13801,7 @@ vst1_s8_x3_(b.0, b.1, b.2, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13080,6 +13835,7 @@ vst1_s16_x3_(b.0, b.1, b.2, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13113,6 +13869,7 @@ vst1_s32_x3_(b.0, b.1, b.2, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13146,6 +13903,7 @@ vst1_s64_x3_(b.0, b.1, b.2, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13179,6 +13937,7 @@ vst1q_s8_x3_(b.0, b.1, b.2, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13212,6 +13971,7 @@ vst1q_s16_x3_(b.0, b.1, b.2, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13245,6 +14005,7 @@ vst1q_s32_x3_(b.0, b.1, b.2, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13278,6 +14039,7 @@ vst1q_s64_x3_(b.0, b.1, b.2, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13311,6 +14073,7 @@ vst1_s8_x4_(b.0, b.1, b.2, b.3, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13344,6 +14107,7 @@ vst1_s16_x4_(b.0, b.1, b.2, b.3, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13377,6 +14141,7 @@ vst1_s32_x4_(b.0, b.1, b.2, b.3, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13410,6 +14175,7 @@ vst1_s64_x4_(b.0, b.1, b.2, b.3, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13443,6 +14209,7 @@ vst1q_s8_x4_(b.0, b.1, b.2, b.3, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13476,6 +14243,7 @@ vst1q_s16_x4_(b.0, b.1, b.2, b.3, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13509,6 +14277,7 @@ vst1q_s32_x4_(b.0, b.1, b.2, b.3, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -13544,6 +14313,7 @@ vst1q_s64_x4_(b.0, b.1, b.2, b.3, a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { vst1_s8_x2(transmute(a), transmute(b)) } @@ -13557,6 +14327,7 @@ pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { vst1_s16_x2(transmute(a), transmute(b)) } @@ -13570,6 +14341,7 @@ pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { vst1_s32_x2(transmute(a), transmute(b)) } @@ -13583,6 +14355,7 @@ pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { vst1_s64_x2(transmute(a), transmute(b)) } @@ -13596,6 +14369,7 @@ pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { vst1q_s8_x2(transmute(a), transmute(b)) } @@ -13609,6 +14383,7 @@ pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { vst1q_s16_x2(transmute(a), transmute(b)) } @@ -13622,6 +14397,7 @@ pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { vst1q_s32_x2(transmute(a), transmute(b)) } @@ -13635,6 +14411,7 @@ pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { vst1q_s64_x2(transmute(a), transmute(b)) } @@ -13648,6 +14425,7 @@ pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { vst1_s8_x3(transmute(a), transmute(b)) } @@ -13661,6 +14439,7 @@ pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { vst1_s16_x3(transmute(a), transmute(b)) } @@ -13674,6 +14453,7 @@ pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { vst1_s32_x3(transmute(a), transmute(b)) } @@ -13687,6 +14467,7 @@ pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { vst1_s64_x3(transmute(a), transmute(b)) } @@ -13700,6 +14481,7 @@ pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { vst1q_s8_x3(transmute(a), transmute(b)) } @@ -13713,6 +14495,7 @@ pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { vst1q_s16_x3(transmute(a), transmute(b)) } @@ -13726,6 +14509,7 @@ pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { vst1q_s32_x3(transmute(a), transmute(b)) } @@ -13739,6 +14523,7 @@ pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { vst1q_s64_x3(transmute(a), transmute(b)) } @@ -13752,6 +14537,7 @@ pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { vst1_s8_x4(transmute(a), transmute(b)) } @@ -13765,6 +14551,7 @@ pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { vst1_s16_x4(transmute(a), transmute(b)) } @@ -13778,6 +14565,7 @@ pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { vst1_s32_x4(transmute(a), transmute(b)) } @@ -13791,6 +14579,7 @@ pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { vst1_s64_x4(transmute(a), transmute(b)) } @@ -13804,6 +14593,7 @@ pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { vst1q_s8_x4(transmute(a), transmute(b)) } @@ -13817,6 +14607,7 @@ pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { vst1q_s16_x4(transmute(a), transmute(b)) } @@ -13830,6 +14621,7 @@ pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { vst1q_s32_x4(transmute(a), transmute(b)) } @@ -13843,6 +14635,7 @@ pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { vst1q_s64_x4(transmute(a), transmute(b)) } @@ -13856,6 +14649,7 @@ pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { vst1_s8_x2(transmute(a), transmute(b)) } @@ -13869,6 +14663,7 @@ pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { vst1_s8_x3(transmute(a), transmute(b)) } @@ -13882,6 +14677,7 @@ pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { vst1_s8_x4(transmute(a), transmute(b)) } @@ -13895,6 +14691,7 @@ pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { vst1q_s8_x2(transmute(a), transmute(b)) } @@ -13908,6 +14705,7 @@ pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { vst1q_s8_x3(transmute(a), transmute(b)) } @@ -13921,6 +14719,7 @@ pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { vst1q_s8_x4(transmute(a), transmute(b)) } @@ -13934,6 +14733,7 @@ pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { vst1_s16_x2(transmute(a), transmute(b)) } @@ -13947,6 +14747,7 @@ pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { vst1_s16_x3(transmute(a), transmute(b)) } @@ -13960,6 +14761,7 @@ pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { vst1_s16_x4(transmute(a), transmute(b)) } @@ -13973,6 +14775,7 @@ pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { vst1q_s16_x2(transmute(a), transmute(b)) } @@ -13986,6 +14789,7 @@ pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { vst1q_s16_x3(transmute(a), transmute(b)) } @@ -13999,6 +14803,7 @@ pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { vst1q_s16_x4(transmute(a), transmute(b)) } @@ -14012,6 +14817,7 @@ pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { vst1_s64_x2(transmute(a), transmute(b)) } @@ -14025,6 +14831,7 @@ pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { vst1_s64_x3(transmute(a), transmute(b)) } @@ -14038,6 +14845,7 @@ pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { vst1_s64_x4(transmute(a), transmute(b)) } @@ -14051,6 +14859,7 @@ pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { vst1q_s64_x2(transmute(a), transmute(b)) } @@ -14064,6 +14873,7 @@ pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { vst1q_s64_x3(transmute(a), transmute(b)) } @@ -14077,6 +14887,7 @@ pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { vst1q_s64_x4(transmute(a), transmute(b)) } @@ -14088,6 +14899,7 @@ pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14121,6 +14933,7 @@ vst1_f32_x2_(b.0, b.1, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14154,6 +14967,7 @@ vst1q_f32_x2_(b.0, b.1, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14187,6 +15001,7 @@ vst1_f32_x3_(b.0, b.1, b.2, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14220,6 +15035,7 @@ vst1q_f32_x3_(b.0, b.1, b.2, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14253,6 +15069,7 @@ vst1_f32_x4_(b.0, b.1, b.2, b.3, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14286,6 +15103,7 @@ vst1q_f32_x4_(b.0, b.1, b.2, b.3, a) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14319,6 +15137,7 @@ vst2_s8_(b.0, b.1, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14352,6 +15171,7 @@ vst2_s16_(b.0, b.1, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14385,6 +15205,7 @@ vst2_s32_(b.0, b.1, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14418,6 +15239,7 @@ vst2q_s8_(b.0, b.1, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14451,6 +15273,7 @@ vst2q_s16_(b.0, b.1, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14484,6 +15307,7 @@ vst2q_s32_(b.0, b.1, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(nop))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14519,6 +15343,7 @@ vst2_s64_(b.0, b.1, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { transmute(vst2_s8(transmute(a), transmute(b))) } @@ -14532,6 +15357,7 @@ pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { transmute(vst2_s16(transmute(a), transmute(b))) } @@ -14545,6 +15371,7 @@ pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { transmute(vst2_s32(transmute(a), transmute(b))) } @@ -14558,6 +15385,7 @@ pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { transmute(vst2q_s8(transmute(a), transmute(b))) } @@ -14571,6 +15399,7 @@ pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { transmute(vst2q_s16(transmute(a), transmute(b))) } @@ -14584,6 +15413,7 @@ pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { transmute(vst2q_s32(transmute(a), transmute(b))) } @@ -14597,6 +15427,7 @@ pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { transmute(vst2_s8(transmute(a), transmute(b))) } @@ -14610,6 +15441,7 @@ pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { transmute(vst2_s16(transmute(a), transmute(b))) } @@ -14623,6 +15455,7 @@ pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { transmute(vst2q_s8(transmute(a), transmute(b))) } @@ -14636,6 +15469,7 @@ pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { transmute(vst2q_s16(transmute(a), transmute(b))) } @@ -14649,6 +15483,7 @@ pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { transmute(vst2_s64(transmute(a), transmute(b))) } @@ -14662,6 +15497,7 @@ pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) { transmute(vst2_s64(transmute(a), transmute(b))) } @@ -14673,6 +15509,7 @@ pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14706,6 +15543,7 @@ vst2_f32_(b.0, b.1, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -14740,6 +15578,7 @@ vst2q_f32_(b.0, b.1, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] @@ -14777,6 +15616,7 @@ vst2_lane_s8_(b.0, b.1, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -14814,6 +15654,7 @@ vst2_lane_s16_(b.0, b.1, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] @@ -14851,6 +15692,7 @@ vst2_lane_s32_(b.0, b.1, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] @@ -14888,6 +15730,7 @@ vst2q_lane_s16_(b.0, b.1, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -14927,6 +15770,7 @@ vst2q_lane_s32_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { static_assert_uimm_bits!(LANE, 3); transmute(vst2_lane_s8::(transmute(a), transmute(b))) @@ -14942,6 +15786,7 @@ pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { static_assert_uimm_bits!(LANE, 2); transmute(vst2_lane_s16::(transmute(a), transmute(b))) @@ -14957,6 +15802,7 @@ pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { static_assert_uimm_bits!(LANE, 1); transmute(vst2_lane_s32::(transmute(a), transmute(b))) @@ -14972,6 +15818,7 @@ pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { static_assert_uimm_bits!(LANE, 3); transmute(vst2q_lane_s16::(transmute(a), transmute(b))) @@ -14987,6 +15834,7 @@ pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { static_assert_uimm_bits!(LANE, 2); transmute(vst2q_lane_s32::(transmute(a), transmute(b))) @@ -15002,6 +15850,7 @@ pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { static_assert_uimm_bits!(LANE, 3); transmute(vst2_lane_s8::(transmute(a), transmute(b))) @@ -15017,6 +15866,7 @@ pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { static_assert_uimm_bits!(LANE, 2); transmute(vst2_lane_s16::(transmute(a), transmute(b))) @@ -15032,6 +15882,7 @@ pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { static_assert_uimm_bits!(LANE, 3); transmute(vst2q_lane_s16::(transmute(a), transmute(b))) @@ -15045,6 +15896,7 @@ pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] @@ -15082,6 +15934,7 @@ vst2_lane_f32_(b.0, b.1, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -15118,6 +15971,7 @@ vst2q_lane_f32_(b.0, b.1, LANE as i64, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15151,6 +16005,7 @@ vst3_s8_(b.0, b.1, b.2, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15184,6 +16039,7 @@ vst3_s16_(b.0, b.1, b.2, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15217,6 +16073,7 @@ vst3_s32_(b.0, b.1, b.2, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15250,6 +16107,7 @@ vst3q_s8_(b.0, b.1, b.2, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15283,6 +16141,7 @@ vst3q_s16_(b.0, b.1, b.2, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15316,6 +16175,7 @@ vst3q_s32_(b.0, b.1, b.2, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(nop))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15351,6 +16211,7 @@ vst3_s64_(b.0, b.1, b.2, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { transmute(vst3_s8(transmute(a), transmute(b))) } @@ -15364,6 +16225,7 @@ pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { transmute(vst3_s16(transmute(a), transmute(b))) } @@ -15377,6 +16239,7 @@ pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { transmute(vst3_s32(transmute(a), transmute(b))) } @@ -15390,6 +16253,7 @@ pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { transmute(vst3q_s8(transmute(a), transmute(b))) } @@ -15403,6 +16267,7 @@ pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { transmute(vst3q_s16(transmute(a), transmute(b))) } @@ -15416,6 +16281,7 @@ pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { transmute(vst3q_s32(transmute(a), transmute(b))) } @@ -15429,6 +16295,7 @@ pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { transmute(vst3_s8(transmute(a), transmute(b))) } @@ -15442,6 +16309,7 @@ pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { transmute(vst3_s16(transmute(a), transmute(b))) } @@ -15455,6 +16323,7 @@ pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { transmute(vst3q_s8(transmute(a), transmute(b))) } @@ -15468,6 +16337,7 @@ pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { transmute(vst3q_s16(transmute(a), transmute(b))) } @@ -15481,6 +16351,7 @@ pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { transmute(vst3_s64(transmute(a), transmute(b))) } @@ -15494,6 +16365,7 @@ pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) { transmute(vst3_s64(transmute(a), transmute(b))) } @@ -15505,6 +16377,7 @@ pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15538,6 +16411,7 @@ vst3_f32_(b.0, b.1, b.2, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15572,6 +16446,7 @@ vst3q_f32_(b.0, b.1, b.2, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] @@ -15609,6 +16484,7 @@ vst3_lane_s8_(b.0, b.1, b.2, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -15646,6 +16522,7 @@ vst3_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] @@ -15683,6 +16560,7 @@ vst3_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] @@ -15720,6 +16598,7 @@ vst3q_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -15759,6 +16638,7 @@ vst3q_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { static_assert_uimm_bits!(LANE, 3); transmute(vst3_lane_s8::(transmute(a), transmute(b))) @@ -15774,6 +16654,7 @@ pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { static_assert_uimm_bits!(LANE, 2); transmute(vst3_lane_s16::(transmute(a), transmute(b))) @@ -15789,6 +16670,7 @@ pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { static_assert_uimm_bits!(LANE, 1); transmute(vst3_lane_s32::(transmute(a), transmute(b))) @@ -15804,6 +16686,7 @@ pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { static_assert_uimm_bits!(LANE, 3); transmute(vst3q_lane_s16::(transmute(a), transmute(b))) @@ -15819,6 +16702,7 @@ pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { static_assert_uimm_bits!(LANE, 2); transmute(vst3q_lane_s32::(transmute(a), transmute(b))) @@ -15834,6 +16718,7 @@ pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { static_assert_uimm_bits!(LANE, 3); transmute(vst3_lane_s8::(transmute(a), transmute(b))) @@ -15849,6 +16734,7 @@ pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { static_assert_uimm_bits!(LANE, 2); transmute(vst3_lane_s16::(transmute(a), transmute(b))) @@ -15864,6 +16750,7 @@ pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { static_assert_uimm_bits!(LANE, 3); transmute(vst3q_lane_s16::(transmute(a), transmute(b))) @@ -15877,6 +16764,7 @@ pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] @@ -15914,6 +16802,7 @@ vst3_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -15950,6 +16839,7 @@ vst3q_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -15983,6 +16873,7 @@ vst4_s8_(b.0, b.1, b.2, b.3, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16016,6 +16907,7 @@ vst4_s16_(b.0, b.1, b.2, b.3, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16049,6 +16941,7 @@ vst4_s32_(b.0, b.1, b.2, b.3, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16082,6 +16975,7 @@ vst4q_s8_(b.0, b.1, b.2, b.3, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16115,6 +17009,7 @@ vst4q_s16_(b.0, b.1, b.2, b.3, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16148,6 +17043,7 @@ vst4q_s32_(b.0, b.1, b.2, b.3, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(nop))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16183,6 +17079,7 @@ vst4_s64_(b.0, b.1, b.2, b.3, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { transmute(vst4_s8(transmute(a), transmute(b))) } @@ -16196,6 +17093,7 @@ pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { transmute(vst4_s16(transmute(a), transmute(b))) } @@ -16209,6 +17107,7 @@ pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { transmute(vst4_s32(transmute(a), transmute(b))) } @@ -16222,6 +17121,7 @@ pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { transmute(vst4q_s8(transmute(a), transmute(b))) } @@ -16235,6 +17135,7 @@ pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { transmute(vst4q_s16(transmute(a), transmute(b))) } @@ -16248,6 +17149,7 @@ pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { transmute(vst4q_s32(transmute(a), transmute(b))) } @@ -16261,6 +17163,7 @@ pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { transmute(vst4_s8(transmute(a), transmute(b))) } @@ -16274,6 +17177,7 @@ pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { transmute(vst4_s16(transmute(a), transmute(b))) } @@ -16287,6 +17191,7 @@ pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { transmute(vst4q_s8(transmute(a), transmute(b))) } @@ -16300,6 +17205,7 @@ pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { transmute(vst4q_s16(transmute(a), transmute(b))) } @@ -16313,6 +17219,7 @@ pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { transmute(vst4_s64(transmute(a), transmute(b))) } @@ -16326,6 +17233,7 @@ pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) { transmute(vst4_s64(transmute(a), transmute(b))) } @@ -16337,6 +17245,7 @@ pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) { #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16370,6 +17279,7 @@ vst4_f32_(b.0, b.1, b.2, b.3, a as _) #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4))] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16404,6 +17314,7 @@ vst4q_f32_(b.0, b.1, b.2, b.3, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] @@ -16441,6 +17352,7 @@ vst4_lane_s8_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -16478,6 +17390,7 @@ vst4_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] @@ -16515,6 +17428,7 @@ vst4_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { static_assert_uimm_bits!(LANE, 3); #[allow(improper_ctypes)] @@ -16552,6 +17466,7 @@ vst4q_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -16591,6 +17506,7 @@ vst4q_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { static_assert_uimm_bits!(LANE, 3); transmute(vst4_lane_s8::(transmute(a), transmute(b))) @@ -16606,6 +17522,7 @@ pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { static_assert_uimm_bits!(LANE, 2); transmute(vst4_lane_s16::(transmute(a), transmute(b))) @@ -16621,6 +17538,7 @@ pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { static_assert_uimm_bits!(LANE, 1); transmute(vst4_lane_s32::(transmute(a), transmute(b))) @@ -16636,6 +17554,7 @@ pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { static_assert_uimm_bits!(LANE, 3); transmute(vst4q_lane_s16::(transmute(a), transmute(b))) @@ -16651,6 +17570,7 @@ pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { static_assert_uimm_bits!(LANE, 2); transmute(vst4q_lane_s32::(transmute(a), transmute(b))) @@ -16666,6 +17586,7 @@ pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { static_assert_uimm_bits!(LANE, 3); transmute(vst4_lane_s8::(transmute(a), transmute(b))) @@ -16681,6 +17602,7 @@ pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { static_assert_uimm_bits!(LANE, 2); transmute(vst4_lane_s16::(transmute(a), transmute(b))) @@ -16696,6 +17618,7 @@ pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { static_assert_uimm_bits!(LANE, 3); transmute(vst4q_lane_s16::(transmute(a), transmute(b))) @@ -16709,6 +17632,7 @@ pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { static_assert_uimm_bits!(LANE, 1); #[allow(improper_ctypes)] @@ -16746,6 +17670,7 @@ vst4_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4, LANE = 0))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { static_assert_uimm_bits!(LANE, 2); #[allow(improper_ctypes)] @@ -16783,6 +17708,8 @@ vst4q_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usdot))] +#[cfg_attr(not(target_arch = "arm"), unstable(feature = "stdarch_neon_i8mm", issue = "117223"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16801,6 +17728,8 @@ vusdot_s32_(a, b, c) #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usdot))] +#[cfg_attr(not(target_arch = "arm"), unstable(feature = "stdarch_neon_i8mm", issue = "117223"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16820,6 +17749,8 @@ vusdotq_s32_(a, b, c) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usdot, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(not(target_arch = "arm"), unstable(feature = "stdarch_neon_i8mm", issue = "117223"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vusdot_lane_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 1); let c: int32x2_t = transmute(c); @@ -16836,6 +17767,8 @@ pub unsafe fn vusdot_lane_s32(a: int32x2_t, b: uint8x8_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usdot, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(not(target_arch = "arm"), unstable(feature = "stdarch_neon_i8mm", issue = "117223"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vusdotq_lane_s32(a: int32x4_t, b: uint8x16_t, c: int8x8_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 1); let c: int32x2_t = transmute(c); @@ -16852,6 +17785,8 @@ pub unsafe fn vusdotq_lane_s32(a: int32x4_t, b: uint8x16_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sudot, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(not(target_arch = "arm"), unstable(feature = "stdarch_neon_i8mm", issue = "117223"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsudot_lane_s32(a: int32x2_t, b: int8x8_t, c: uint8x8_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 1); let c: uint32x2_t = transmute(c); @@ -16868,6 +17803,8 @@ pub unsafe fn vsudot_lane_s32(a: int32x2_t, b: int8x8_t, c: uin #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sudot, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(not(target_arch = "arm"), unstable(feature = "stdarch_neon_i8mm", issue = "117223"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsudotq_lane_s32(a: int32x4_t, b: int8x16_t, c: uint8x8_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 1); let c: uint32x2_t = transmute(c); @@ -16884,6 +17821,7 @@ pub unsafe fn vsudotq_lane_s32(a: int32x4_t, b: int8x16_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_mul(a, b) } @@ -16897,6 +17835,7 @@ pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_mul(a, b) } @@ -16910,6 +17849,7 @@ pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_mul(a, b) } @@ -16923,6 +17863,7 @@ pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_mul(a, b) } @@ -16936,6 +17877,7 @@ pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_mul(a, b) } @@ -16949,6 +17891,7 @@ pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_mul(a, b) } @@ -16962,6 +17905,7 @@ pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_mul(a, b) } @@ -16975,6 +17919,7 @@ pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_mul(a, b) } @@ -16988,6 +17933,7 @@ pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_mul(a, b) } @@ -17001,6 +17947,7 @@ pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_mul(a, b) } @@ -17014,6 +17961,7 @@ pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_mul(a, b) } @@ -17027,6 +17975,7 @@ pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_mul(a, b) } @@ -17040,6 +17989,7 @@ pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17059,6 +18009,7 @@ vmul_p8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17078,6 +18029,7 @@ vmulq_p8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_mul(a, b) } @@ -17091,6 +18043,7 @@ pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_mul(a, b) } @@ -17104,6 +18057,7 @@ pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { simd_mul(a, vdup_n_s16(b)) } @@ -17117,6 +18071,7 @@ pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { simd_mul(a, vdupq_n_s16(b)) } @@ -17130,6 +18085,7 @@ pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { simd_mul(a, vdup_n_s32(b)) } @@ -17143,6 +18099,7 @@ pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { simd_mul(a, vdupq_n_s32(b)) } @@ -17156,6 +18113,7 @@ pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { simd_mul(a, vdup_n_u16(b)) } @@ -17169,6 +18127,7 @@ pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { simd_mul(a, vdupq_n_u16(b)) } @@ -17182,6 +18141,7 @@ pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { simd_mul(a, vdup_n_u32(b)) } @@ -17195,6 +18155,7 @@ pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { simd_mul(a, vdupq_n_u32(b)) } @@ -17208,6 +18169,7 @@ pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { simd_mul(a, vdup_n_f32(b)) } @@ -17221,6 +18183,7 @@ pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { simd_mul(a, vdupq_n_f32(b)) } @@ -17235,6 +18198,7 @@ pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17250,6 +18214,7 @@ pub unsafe fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int1 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { static_assert_uimm_bits!(LANE, 3); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17265,6 +18230,7 @@ pub unsafe fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17280,6 +18246,7 @@ pub unsafe fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_uimm_bits!(LANE, 3); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17295,6 +18262,7 @@ pub unsafe fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> in #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 1); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) @@ -17310,6 +18278,7 @@ pub unsafe fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int3 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) @@ -17325,6 +18294,7 @@ pub unsafe fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 1); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17340,6 +18310,7 @@ pub unsafe fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17355,6 +18326,7 @@ pub unsafe fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> in #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17370,6 +18342,7 @@ pub unsafe fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> ui #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t { static_assert_uimm_bits!(LANE, 3); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17385,6 +18358,7 @@ pub unsafe fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t { static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17400,6 +18374,7 @@ pub unsafe fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert_uimm_bits!(LANE, 3); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17415,6 +18390,7 @@ pub unsafe fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert_uimm_bits!(LANE, 1); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) @@ -17430,6 +18406,7 @@ pub unsafe fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> ui #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t { static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) @@ -17445,6 +18422,7 @@ pub unsafe fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 1); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17460,6 +18438,7 @@ pub unsafe fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17475,6 +18454,7 @@ pub unsafe fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { static_assert_uimm_bits!(LANE, 1); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) @@ -17490,6 +18470,7 @@ pub unsafe fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t { static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) @@ -17505,6 +18486,7 @@ pub unsafe fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t { static_assert_uimm_bits!(LANE, 1); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17520,6 +18502,7 @@ pub unsafe fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { static_assert_uimm_bits!(LANE, 2); simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17534,6 +18517,7 @@ pub unsafe fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) - #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17553,6 +18537,7 @@ vmull_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17572,6 +18557,7 @@ vmull_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17591,6 +18577,7 @@ vmull_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17610,6 +18597,7 @@ vmull_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17629,6 +18617,7 @@ vmull_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17648,6 +18637,7 @@ vmull_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.p8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmull))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17667,6 +18657,7 @@ vmull_p8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { vmull_s16(a, vdup_n_s16(b)) } @@ -17680,6 +18671,7 @@ pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { vmull_s32(a, vdup_n_s32(b)) } @@ -17693,6 +18685,7 @@ pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { vmull_u16(a, vdup_n_u16(b)) } @@ -17706,6 +18699,7 @@ pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { vmull_u32(a, vdup_n_u32(b)) } @@ -17720,6 +18714,7 @@ pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 2); vmull_s16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17735,6 +18730,7 @@ pub unsafe fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 3); vmull_s16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17750,6 +18746,7 @@ pub unsafe fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> in #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { static_assert_uimm_bits!(LANE, 1); vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) @@ -17765,6 +18762,7 @@ pub unsafe fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t { static_assert_uimm_bits!(LANE, 2); vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) @@ -17780,6 +18778,7 @@ pub unsafe fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> in #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 2); vmull_u16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17795,6 +18794,7 @@ pub unsafe fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 3); vmull_u16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17810,6 +18810,7 @@ pub unsafe fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { static_assert_uimm_bits!(LANE, 1); vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) @@ -17825,6 +18826,7 @@ pub unsafe fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> u #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint64x2_t { static_assert_uimm_bits!(LANE, 2); vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) @@ -17839,6 +18841,7 @@ pub unsafe fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17858,6 +18861,7 @@ vfma_f32_(b, c, a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17877,6 +18881,7 @@ vfmaq_f32_(b, c, a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vfma_f32(a, b, vdup_n_f32_vfp4(c)) } @@ -17890,6 +18895,7 @@ pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vfmaq_f32(a, b, vdupq_n_f32_vfp4(c)) } @@ -17903,6 +18909,7 @@ pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { let b: float32x2_t = simd_neg(b); vfma_f32(a, b, c) @@ -17917,6 +18924,7 @@ pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { let b: float32x4_t = simd_neg(b); vfmaq_f32(a, b, c) @@ -17931,6 +18939,7 @@ pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vfms_f32(a, b, vdup_n_f32_vfp4(c)) } @@ -17944,6 +18953,7 @@ pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vfmsq_f32(a, b, vdupq_n_f32_vfp4(c)) } @@ -17957,6 +18967,7 @@ pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_sub(a, b) } @@ -17970,6 +18981,7 @@ pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_sub(a, b) } @@ -17983,6 +18995,7 @@ pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_sub(a, b) } @@ -17996,6 +19009,7 @@ pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_sub(a, b) } @@ -18009,6 +19023,7 @@ pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_sub(a, b) } @@ -18022,6 +19037,7 @@ pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_sub(a, b) } @@ -18035,6 +19051,7 @@ pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_sub(a, b) } @@ -18048,6 +19065,7 @@ pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_sub(a, b) } @@ -18061,6 +19079,7 @@ pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_sub(a, b) } @@ -18074,6 +19093,7 @@ pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_sub(a, b) } @@ -18087,6 +19107,7 @@ pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_sub(a, b) } @@ -18100,6 +19121,7 @@ pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_sub(a, b) } @@ -18113,6 +19135,7 @@ pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_sub(a, b) } @@ -18126,6 +19149,7 @@ pub unsafe fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_sub(a, b) } @@ -18139,6 +19163,7 @@ pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_sub(a, b) } @@ -18152,6 +19177,7 @@ pub unsafe fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_sub(a, b) } @@ -18165,6 +19191,7 @@ pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_sub(a, b) } @@ -18178,6 +19205,7 @@ pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_sub(a, b) } @@ -18191,6 +19219,7 @@ pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_xor(a, b) } @@ -18204,6 +19233,7 @@ pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_xor(a, b) } @@ -18217,6 +19247,7 @@ pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { simd_xor(a, b) } @@ -18230,6 +19261,7 @@ pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_xor(a, b) } @@ -18243,6 +19275,7 @@ pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { simd_xor(a, b) } @@ -18256,6 +19289,7 @@ pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_xor(a, b) } @@ -18269,6 +19303,7 @@ pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vaddq_p128(a: p128, b: p128) -> p128 { a ^ b } @@ -18282,6 +19317,7 @@ pub unsafe fn vaddq_p128(a: p128, b: p128) -> p128 { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { let c: i16x8 = i16x8::new(8, 8, 8, 8, 8, 8, 8, 8); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18296,6 +19332,7 @@ pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { let c: i32x4 = i32x4::new(16, 16, 16, 16); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18310,6 +19347,7 @@ pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { let c: i64x2 = i64x2::new(32, 32); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18324,6 +19362,7 @@ pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { let c: u16x8 = u16x8::new(8, 8, 8, 8, 8, 8, 8, 8); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18338,6 +19377,7 @@ pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { let c: u32x4 = u32x4::new(16, 16, 16, 16); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18352,6 +19392,7 @@ pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { let c: u64x2 = u64x2::new(32, 32); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18366,6 +19407,7 @@ pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { let d: int8x8_t = vsubhn_s16(b, c); simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -18380,6 +19422,7 @@ pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { let d: int16x4_t = vsubhn_s32(b, c); simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -18394,6 +19437,7 @@ pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { let d: int32x2_t = vsubhn_s64(b, c); simd_shuffle!(a, d, [0, 1, 2, 3]) @@ -18408,6 +19452,7 @@ pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { let d: uint8x8_t = vsubhn_u16(b, c); simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -18422,6 +19467,7 @@ pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uin #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { let d: uint16x4_t = vsubhn_u32(b, c); simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -18436,6 +19482,7 @@ pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { let d: uint32x2_t = vsubhn_u64(b, c); simd_shuffle!(a, d, [0, 1, 2, 3]) @@ -18450,6 +19497,7 @@ pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18469,6 +19517,7 @@ vhsub_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18488,6 +19537,7 @@ vhsubq_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18507,6 +19557,7 @@ vhsub_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18526,6 +19577,7 @@ vhsubq_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18545,6 +19597,7 @@ vhsub_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18564,6 +19617,7 @@ vhsubq_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18583,6 +19637,7 @@ vhsub_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18602,6 +19657,7 @@ vhsubq_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18621,6 +19677,7 @@ vhsub_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18640,6 +19697,7 @@ vhsubq_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18659,6 +19717,7 @@ vhsub_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18678,6 +19737,7 @@ vhsubq_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubw))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { simd_sub(a, simd_cast(b)) } @@ -18691,6 +19751,7 @@ pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubw))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { simd_sub(a, simd_cast(b)) } @@ -18704,6 +19765,7 @@ pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubw))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { simd_sub(a, simd_cast(b)) } @@ -18717,6 +19779,7 @@ pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubw))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { simd_sub(a, simd_cast(b)) } @@ -18730,6 +19793,7 @@ pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubw))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { simd_sub(a, simd_cast(b)) } @@ -18743,6 +19807,7 @@ pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubw))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { simd_sub(a, simd_cast(b)) } @@ -18756,6 +19821,7 @@ pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { let c: int16x8_t = simd_cast(a); let d: int16x8_t = simd_cast(b); @@ -18771,6 +19837,7 @@ pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { let c: int32x4_t = simd_cast(a); let d: int32x4_t = simd_cast(b); @@ -18786,6 +19853,7 @@ pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { let c: int64x2_t = simd_cast(a); let d: int64x2_t = simd_cast(b); @@ -18801,6 +19869,7 @@ pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { let c: uint16x8_t = simd_cast(a); let d: uint16x8_t = simd_cast(b); @@ -18816,6 +19885,7 @@ pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { let c: uint32x4_t = simd_cast(a); let d: uint32x4_t = simd_cast(b); @@ -18831,6 +19901,7 @@ pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { let c: uint64x2_t = simd_cast(a); let d: uint64x2_t = simd_cast(b); @@ -18845,6 +19916,8 @@ pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sdot))] +#[cfg_attr(not(target_arch = "arm"), unstable(feature = "stdarch_neon_dotprod", issue = "117224"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18863,6 +19936,8 @@ vdot_s32_(a, b, c) #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sdot))] +#[cfg_attr(not(target_arch = "arm"), unstable(feature = "stdarch_neon_dotprod", issue = "117224"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18881,6 +19956,8 @@ vdotq_s32_(a, b, c) #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(udot))] +#[cfg_attr(not(target_arch = "arm"), unstable(feature = "stdarch_neon_dotprod", issue = "117224"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18899,6 +19976,8 @@ vdot_u32_(a, b, c) #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(udot))] +#[cfg_attr(not(target_arch = "arm"), unstable(feature = "stdarch_neon_dotprod", issue = "117224"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18918,6 +19997,8 @@ vdotq_u32_(a, b, c) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sdot, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(not(target_arch = "arm"), unstable(feature = "stdarch_neon_dotprod", issue = "117224"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 1); let c: int32x2_t = transmute(c); @@ -18934,6 +20015,8 @@ pub unsafe fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sdot, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(not(target_arch = "arm"), unstable(feature = "stdarch_neon_dotprod", issue = "117224"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdotq_lane_s32(a: int32x4_t, b: int8x16_t, c: int8x8_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 1); let c: int32x2_t = transmute(c); @@ -18950,6 +20033,8 @@ pub unsafe fn vdotq_lane_s32(a: int32x4_t, b: int8x16_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(udot, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(not(target_arch = "arm"), unstable(feature = "stdarch_neon_dotprod", issue = "117224"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdot_lane_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { static_assert_uimm_bits!(LANE, 1); let c: uint32x2_t = transmute(c); @@ -18966,6 +20051,8 @@ pub unsafe fn vdot_lane_u32(a: uint32x2_t, b: uint8x8_t, c: uin #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(udot, LANE = 0))] #[rustc_legacy_const_generics(3)] +#[cfg_attr(not(target_arch = "arm"), unstable(feature = "stdarch_neon_dotprod", issue = "117224"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vdotq_lane_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x8_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 1); let c: uint32x2_t = transmute(c); @@ -18982,6 +20069,7 @@ pub unsafe fn vdotq_lane_u32(a: uint32x4_t, b: uint8x16_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19001,6 +20089,7 @@ vmax_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19020,6 +20109,7 @@ vmaxq_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19039,6 +20129,7 @@ vmax_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19058,6 +20149,7 @@ vmaxq_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19077,6 +20169,7 @@ vmax_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19096,6 +20189,7 @@ vmaxq_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19115,6 +20209,7 @@ vmax_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19134,6 +20229,7 @@ vmaxq_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19153,6 +20249,7 @@ vmax_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19172,6 +20269,7 @@ vmaxq_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19191,6 +20289,7 @@ vmax_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19210,6 +20309,7 @@ vmaxq_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmax))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19229,6 +20329,7 @@ vmax_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmax))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19248,6 +20349,7 @@ vmaxq_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmaxnm))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19267,6 +20369,7 @@ vmaxnm_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmaxnm))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19286,6 +20389,7 @@ vmaxnmq_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19305,6 +20409,7 @@ vmin_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19324,6 +20429,7 @@ vminq_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19343,6 +20449,7 @@ vmin_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19362,6 +20469,7 @@ vminq_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19381,6 +20489,7 @@ vmin_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19400,6 +20509,7 @@ vminq_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19419,6 +20529,7 @@ vmin_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19438,6 +20549,7 @@ vminq_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19457,6 +20569,7 @@ vmin_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19476,6 +20589,7 @@ vminq_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19495,6 +20609,7 @@ vmin_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19514,6 +20629,7 @@ vminq_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmin))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19533,6 +20649,7 @@ vmin_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmin))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19552,6 +20669,7 @@ vminq_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fminnm))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19571,6 +20689,7 @@ vminnm_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fminnm))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19590,6 +20709,7 @@ vminnmq_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(faddp))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19609,6 +20729,7 @@ vpadd_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19628,6 +20749,7 @@ vqdmull_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19647,6 +20769,7 @@ vqdmull_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { vqdmull_s16(a, vdup_n_s16(b)) } @@ -19660,6 +20783,7 @@ pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { vqdmull_s32(a, vdup_n_s32(b)) } @@ -19674,6 +20798,7 @@ pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { static_assert_uimm_bits!(N, 2); let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]); @@ -19690,6 +20815,7 @@ pub unsafe fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int3 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull, N = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { static_assert_uimm_bits!(N, 1); let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]); @@ -19705,6 +20831,7 @@ pub unsafe fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int6 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { vqaddq_s32(a, vqdmull_s16(b, c)) } @@ -19718,6 +20845,7 @@ pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { vqaddq_s64(a, vqdmull_s32(b, c)) } @@ -19731,6 +20859,7 @@ pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vqaddq_s32(a, vqdmull_n_s16(b, c)) } @@ -19744,6 +20873,7 @@ pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vqaddq_s64(a, vqdmull_n_s32(b, c)) } @@ -19758,6 +20888,7 @@ pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal, N = 2))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { static_assert_uimm_bits!(N, 2); vqaddq_s32(a, vqdmull_lane_s16::(b, c)) @@ -19773,6 +20904,7 @@ pub unsafe fn vqdmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int1 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal, N = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { static_assert_uimm_bits!(N, 1); vqaddq_s64(a, vqdmull_lane_s32::(b, c)) @@ -19787,6 +20919,7 @@ pub unsafe fn vqdmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int3 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { vqsubq_s32(a, vqdmull_s16(b, c)) } @@ -19800,6 +20933,7 @@ pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { vqsubq_s64(a, vqdmull_s32(b, c)) } @@ -19813,6 +20947,7 @@ pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vqsubq_s32(a, vqdmull_n_s16(b, c)) } @@ -19826,6 +20961,7 @@ pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vqsubq_s64(a, vqdmull_n_s32(b, c)) } @@ -19840,6 +20976,7 @@ pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl, N = 2))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { static_assert_uimm_bits!(N, 2); vqsubq_s32(a, vqdmull_lane_s16::(b, c)) @@ -19855,6 +20992,7 @@ pub unsafe fn vqdmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int1 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl, N = 1))] #[rustc_legacy_const_generics(3)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { static_assert_uimm_bits!(N, 1); vqsubq_s64(a, vqdmull_lane_s32::(b, c)) @@ -19869,6 +21007,7 @@ pub unsafe fn vqdmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int3 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19888,6 +21027,7 @@ vqdmulh_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19907,6 +21047,7 @@ vqdmulhq_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19926,6 +21067,7 @@ vqdmulh_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19945,6 +21087,7 @@ vqdmulhq_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { let b: int16x4_t = vdup_n_s16(b); vqdmulh_s16(a, b) @@ -19959,6 +21102,7 @@ pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { let b: int32x2_t = vdup_n_s32(b); vqdmulh_s32(a, b) @@ -19973,6 +21117,7 @@ pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { let b: int16x8_t = vdupq_n_s16(b); vqdmulhq_s16(a, b) @@ -19987,6 +21132,7 @@ pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { let b: int32x4_t = vdupq_n_s32(b); vqdmulhq_s32(a, b) @@ -20002,6 +21148,7 @@ pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_uimm_bits!(LANE, 3); vqdmulhq_s16(a, vdupq_n_s16(simd_extract(b, LANE as u32))) @@ -20017,6 +21164,7 @@ pub unsafe fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { static_assert_uimm_bits!(LANE, 3); vqdmulh_s16(a, vdup_n_s16(simd_extract(b, LANE as u32))) @@ -20032,6 +21180,7 @@ pub unsafe fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 2); vqdmulhq_s32(a, vdupq_n_s32(simd_extract(b, LANE as u32))) @@ -20047,6 +21196,7 @@ pub unsafe fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 2); vqdmulh_s32(a, vdup_n_s32(simd_extract(b, LANE as u32))) @@ -20061,6 +21211,7 @@ pub unsafe fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20080,6 +21231,7 @@ vqmovn_s16_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20099,6 +21251,7 @@ vqmovn_s32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20118,6 +21271,7 @@ vqmovn_s64_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20137,6 +21291,7 @@ vqmovn_u16_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20156,6 +21311,7 @@ vqmovn_u32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20175,6 +21331,7 @@ vqmovn_u64_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20194,6 +21351,7 @@ vqmovun_s16_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20213,6 +21371,7 @@ vqmovun_s32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20232,6 +21391,7 @@ vqmovun_s64_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20251,6 +21411,7 @@ vqrdmulh_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20270,6 +21431,7 @@ vqrdmulhq_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20289,6 +21451,7 @@ vqrdmulh_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20308,6 +21471,7 @@ vqrdmulhq_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { vqrdmulh_s16(a, vdup_n_s16(b)) } @@ -20321,6 +21485,7 @@ pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { vqrdmulhq_s16(a, vdupq_n_s16(b)) } @@ -20334,6 +21499,7 @@ pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { vqrdmulh_s32(a, vdup_n_s32(b)) } @@ -20347,6 +21513,7 @@ pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { vqrdmulhq_s32(a, vdupq_n_s32(b)) } @@ -20361,6 +21528,7 @@ pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_uimm_bits!(LANE, 2); let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20377,6 +21545,7 @@ pub unsafe fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t { static_assert_uimm_bits!(LANE, 3); let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20393,6 +21562,7 @@ pub unsafe fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t { static_assert_uimm_bits!(LANE, 2); let b: int16x8_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20409,6 +21579,7 @@ pub unsafe fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_uimm_bits!(LANE, 3); let b: int16x8_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20425,6 +21596,7 @@ pub unsafe fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) - #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 1); let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); @@ -20441,6 +21613,7 @@ pub unsafe fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 2); let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]); @@ -20457,6 +21630,7 @@ pub unsafe fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 1); let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20473,6 +21647,7 @@ pub unsafe fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 2); let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20488,6 +21663,7 @@ pub unsafe fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) - #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20507,6 +21683,7 @@ vqrshl_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20526,6 +21703,7 @@ vqrshlq_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20545,6 +21723,7 @@ vqrshl_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20564,6 +21743,7 @@ vqrshlq_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20583,6 +21763,7 @@ vqrshl_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20602,6 +21783,7 @@ vqrshlq_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20621,6 +21803,7 @@ vqrshl_s64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20640,6 +21823,7 @@ vqrshlq_s64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20659,6 +21843,7 @@ vqrshl_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20678,6 +21863,7 @@ vqrshlq_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20697,6 +21883,7 @@ vqrshl_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20716,6 +21903,7 @@ vqrshlq_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20735,6 +21923,7 @@ vqrshl_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20754,6 +21943,7 @@ vqrshlq_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20773,6 +21963,7 @@ vqrshl_u64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20791,6 +21982,7 @@ vqrshlq_u64_(a, b) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] @@ -20828,6 +22020,7 @@ vqrshrn_n_s16_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] @@ -20865,6 +22058,7 @@ vqrshrn_n_s32_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -20902,6 +22096,7 @@ vqrshrn_n_s64_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] @@ -20939,6 +22134,7 @@ vqrshrn_n_u16_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] @@ -20976,6 +22172,7 @@ vqrshrn_n_u32_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -21013,6 +22210,7 @@ vqrshrn_n_u64_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] @@ -21050,6 +22248,7 @@ vqrshrun_n_s16_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] @@ -21087,6 +22286,7 @@ vqrshrun_n_s32_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -21125,6 +22325,7 @@ vqrshrun_n_s64_(a, N) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21144,6 +22345,7 @@ vqshl_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21163,6 +22365,7 @@ vqshlq_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21182,6 +22385,7 @@ vqshl_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21201,6 +22405,7 @@ vqshlq_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21220,6 +22425,7 @@ vqshl_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21239,6 +22445,7 @@ vqshlq_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21258,6 +22465,7 @@ vqshl_s64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21277,6 +22485,7 @@ vqshlq_s64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21296,6 +22505,7 @@ vqshl_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21315,6 +22525,7 @@ vqshlq_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21334,6 +22545,7 @@ vqshl_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21353,6 +22565,7 @@ vqshlq_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21372,6 +22585,7 @@ vqshl_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21391,6 +22605,7 @@ vqshlq_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21410,6 +22625,7 @@ vqshl_u64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21430,6 +22646,7 @@ vqshlq_u64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { static_assert_uimm_bits!(N, 3); vqshl_s8(a, vdup_n_s8(N as _)) @@ -21445,6 +22662,7 @@ pub unsafe fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { static_assert_uimm_bits!(N, 3); vqshlq_s8(a, vdupq_n_s8(N as _)) @@ -21460,6 +22678,7 @@ pub unsafe fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { static_assert_uimm_bits!(N, 4); vqshl_s16(a, vdup_n_s16(N as _)) @@ -21475,6 +22694,7 @@ pub unsafe fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { static_assert_uimm_bits!(N, 4); vqshlq_s16(a, vdupq_n_s16(N as _)) @@ -21490,6 +22710,7 @@ pub unsafe fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { static_assert_uimm_bits!(N, 5); vqshl_s32(a, vdup_n_s32(N as _)) @@ -21505,6 +22726,7 @@ pub unsafe fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { static_assert_uimm_bits!(N, 5); vqshlq_s32(a, vdupq_n_s32(N as _)) @@ -21520,6 +22742,7 @@ pub unsafe fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshl_n_s64(a: int64x1_t) -> int64x1_t { static_assert_uimm_bits!(N, 6); vqshl_s64(a, vdup_n_s64(N as _)) @@ -21535,6 +22758,7 @@ pub unsafe fn vqshl_n_s64(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { static_assert_uimm_bits!(N, 6); vqshlq_s64(a, vdupq_n_s64(N as _)) @@ -21550,6 +22774,7 @@ pub unsafe fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); vqshl_u8(a, vdup_n_s8(N as _)) @@ -21565,6 +22790,7 @@ pub unsafe fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { static_assert_uimm_bits!(N, 3); vqshlq_u8(a, vdupq_n_s8(N as _)) @@ -21580,6 +22806,7 @@ pub unsafe fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { static_assert_uimm_bits!(N, 4); vqshl_u16(a, vdup_n_s16(N as _)) @@ -21595,6 +22822,7 @@ pub unsafe fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { static_assert_uimm_bits!(N, 4); vqshlq_u16(a, vdupq_n_s16(N as _)) @@ -21610,6 +22838,7 @@ pub unsafe fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { static_assert_uimm_bits!(N, 5); vqshl_u32(a, vdup_n_s32(N as _)) @@ -21625,6 +22854,7 @@ pub unsafe fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { static_assert_uimm_bits!(N, 5); vqshlq_u32(a, vdupq_n_s32(N as _)) @@ -21640,6 +22870,7 @@ pub unsafe fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t { static_assert_uimm_bits!(N, 6); vqshl_u64(a, vdup_n_s64(N as _)) @@ -21655,6 +22886,7 @@ pub unsafe fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { static_assert_uimm_bits!(N, 6); vqshlq_u64(a, vdupq_n_s64(N as _)) @@ -21668,6 +22900,7 @@ pub unsafe fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); #[allow(improper_ctypes)] @@ -21705,6 +22938,7 @@ vqshlu_n_s8_(a, int8x8_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { static_assert_uimm_bits!(N, 4); #[allow(improper_ctypes)] @@ -21742,6 +22976,7 @@ vqshlu_n_s16_(a, int16x4_t(N as i16, N as i16, N as i16, N as i16)) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { static_assert_uimm_bits!(N, 5); #[allow(improper_ctypes)] @@ -21779,6 +23014,7 @@ vqshlu_n_s32_(a, int32x2_t(N as i32, N as i32)) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { static_assert_uimm_bits!(N, 6); #[allow(improper_ctypes)] @@ -21816,6 +23052,7 @@ vqshlu_n_s64_(a, int64x1_t(N as i64)) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { static_assert_uimm_bits!(N, 3); #[allow(improper_ctypes)] @@ -21853,6 +23090,7 @@ vqshluq_n_s8_(a, int8x16_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { static_assert_uimm_bits!(N, 4); #[allow(improper_ctypes)] @@ -21890,6 +23128,7 @@ vqshluq_n_s16_(a, int16x8_t(N as i16, N as i16, N as i16, N as i16, N as i16, N #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { static_assert_uimm_bits!(N, 5); #[allow(improper_ctypes)] @@ -21927,6 +23166,7 @@ vqshluq_n_s32_(a, int32x4_t(N as i32, N as i32, N as i32, N as i32)) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshlu, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { static_assert_uimm_bits!(N, 6); #[allow(improper_ctypes)] @@ -21964,6 +23204,7 @@ vqshluq_n_s64_(a, int64x2_t(N as i64, N as i64)) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] @@ -22001,6 +23242,7 @@ vqshrn_n_s16_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] @@ -22038,6 +23280,7 @@ vqshrn_n_s32_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -22075,6 +23318,7 @@ vqshrn_n_s64_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] @@ -22112,6 +23356,7 @@ vqshrn_n_u16_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] @@ -22149,6 +23394,7 @@ vqshrn_n_u32_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -22186,6 +23432,7 @@ vqshrn_n_u64_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] @@ -22223,6 +23470,7 @@ vqshrun_n_s16_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] @@ -22260,6 +23508,7 @@ vqshrun_n_s32_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrun, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -22298,6 +23547,7 @@ vqshrun_n_s64_(a, N) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrte))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22317,6 +23567,7 @@ vrsqrte_f32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrte))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22336,6 +23587,7 @@ vrsqrteq_f32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursqrte))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22355,6 +23607,7 @@ vrsqrte_u32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursqrte))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22374,6 +23627,7 @@ vrsqrteq_u32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrts))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22393,6 +23647,7 @@ vrsqrts_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrts))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22412,6 +23667,7 @@ vrsqrtsq_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecpe))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22431,6 +23687,7 @@ vrecpe_f32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecpe))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22450,6 +23707,7 @@ vrecpeq_f32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urecpe))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22469,6 +23727,7 @@ vrecpe_u32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urecpe))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22488,6 +23747,7 @@ vrecpeq_u32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecps))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22507,6 +23767,7 @@ vrecps_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecps))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22526,6 +23787,7 @@ vrecpsq_f32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { transmute(a) } @@ -22539,6 +23801,7 @@ pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { transmute(a) } @@ -22552,6 +23815,7 @@ pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { transmute(a) } @@ -22565,6 +23829,7 @@ pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { transmute(a) } @@ -22578,6 +23843,7 @@ pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { transmute(a) } @@ -22591,6 +23857,7 @@ pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { transmute(a) } @@ -22604,6 +23871,7 @@ pub unsafe fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { transmute(a) } @@ -22617,6 +23885,7 @@ pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { transmute(a) } @@ -22630,6 +23899,7 @@ pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { transmute(a) } @@ -22643,6 +23913,7 @@ pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { transmute(a) } @@ -22656,6 +23927,7 @@ pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { transmute(a) } @@ -22669,6 +23941,7 @@ pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { transmute(a) } @@ -22682,6 +23955,7 @@ pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { transmute(a) } @@ -22695,6 +23969,7 @@ pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { transmute(a) } @@ -22708,6 +23983,7 @@ pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { transmute(a) } @@ -22721,6 +23997,7 @@ pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { transmute(a) } @@ -22734,6 +24011,7 @@ pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { transmute(a) } @@ -22747,6 +24025,7 @@ pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { transmute(a) } @@ -22760,6 +24039,7 @@ pub unsafe fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { transmute(a) } @@ -22773,6 +24053,7 @@ pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { transmute(a) } @@ -22786,6 +24067,7 @@ pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { transmute(a) } @@ -22799,6 +24081,7 @@ pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { transmute(a) } @@ -22812,6 +24095,7 @@ pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { transmute(a) } @@ -22825,6 +24109,7 @@ pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { transmute(a) } @@ -22838,6 +24123,7 @@ pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { transmute(a) } @@ -22851,6 +24137,7 @@ pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { transmute(a) } @@ -22864,6 +24151,7 @@ pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { transmute(a) } @@ -22877,6 +24165,7 @@ pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { transmute(a) } @@ -22890,6 +24179,7 @@ pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { transmute(a) } @@ -22903,6 +24193,7 @@ pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { transmute(a) } @@ -22916,6 +24207,7 @@ pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { transmute(a) } @@ -22929,6 +24221,7 @@ pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { transmute(a) } @@ -22942,6 +24235,7 @@ pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { transmute(a) } @@ -22955,6 +24249,7 @@ pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { transmute(a) } @@ -22968,6 +24263,7 @@ pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { transmute(a) } @@ -22981,6 +24277,7 @@ pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { transmute(a) } @@ -22994,6 +24291,7 @@ pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { transmute(a) } @@ -23007,6 +24305,7 @@ pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { transmute(a) } @@ -23020,6 +24319,7 @@ pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { transmute(a) } @@ -23033,6 +24333,7 @@ pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { transmute(a) } @@ -23046,6 +24347,7 @@ pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { transmute(a) } @@ -23059,6 +24361,7 @@ pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { transmute(a) } @@ -23072,6 +24375,7 @@ pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { transmute(a) } @@ -23085,6 +24389,7 @@ pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { transmute(a) } @@ -23098,6 +24403,7 @@ pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { transmute(a) } @@ -23111,6 +24417,7 @@ pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { transmute(a) } @@ -23124,6 +24431,7 @@ pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { transmute(a) } @@ -23137,6 +24445,7 @@ pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { transmute(a) } @@ -23150,6 +24459,7 @@ pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { transmute(a) } @@ -23163,6 +24473,7 @@ pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { transmute(a) } @@ -23176,6 +24487,7 @@ pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { transmute(a) } @@ -23189,6 +24501,7 @@ pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { transmute(a) } @@ -23202,6 +24515,7 @@ pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { transmute(a) } @@ -23215,6 +24529,7 @@ pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { transmute(a) } @@ -23228,6 +24543,7 @@ pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { transmute(a) } @@ -23241,6 +24557,7 @@ pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { transmute(a) } @@ -23254,6 +24571,7 @@ pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { transmute(a) } @@ -23267,6 +24585,7 @@ pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { transmute(a) } @@ -23280,6 +24599,7 @@ pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { transmute(a) } @@ -23293,6 +24613,7 @@ pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { transmute(a) } @@ -23306,6 +24627,7 @@ pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { transmute(a) } @@ -23319,6 +24641,7 @@ pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { transmute(a) } @@ -23332,6 +24655,7 @@ pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { transmute(a) } @@ -23345,6 +24669,7 @@ pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { transmute(a) } @@ -23358,6 +24683,7 @@ pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { transmute(a) } @@ -23371,6 +24697,7 @@ pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { transmute(a) } @@ -23384,6 +24711,7 @@ pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { transmute(a) } @@ -23397,6 +24725,7 @@ pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { transmute(a) } @@ -23410,6 +24739,7 @@ pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { transmute(a) } @@ -23423,6 +24753,7 @@ pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { transmute(a) } @@ -23436,6 +24767,7 @@ pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { transmute(a) } @@ -23449,6 +24781,7 @@ pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { transmute(a) } @@ -23462,6 +24795,7 @@ pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { transmute(a) } @@ -23475,6 +24809,7 @@ pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { transmute(a) } @@ -23488,6 +24823,7 @@ pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { transmute(a) } @@ -23501,6 +24837,7 @@ pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { transmute(a) } @@ -23514,6 +24851,7 @@ pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { transmute(a) } @@ -23527,6 +24865,7 @@ pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { transmute(a) } @@ -23540,6 +24879,7 @@ pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { transmute(a) } @@ -23553,6 +24893,7 @@ pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { transmute(a) } @@ -23566,6 +24907,7 @@ pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { transmute(a) } @@ -23579,6 +24921,7 @@ pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { transmute(a) } @@ -23592,6 +24935,7 @@ pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { transmute(a) } @@ -23605,6 +24949,7 @@ pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { transmute(a) } @@ -23618,6 +24963,7 @@ pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { transmute(a) } @@ -23631,6 +24977,7 @@ pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { transmute(a) } @@ -23644,6 +24991,7 @@ pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { transmute(a) } @@ -23657,6 +25005,7 @@ pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { transmute(a) } @@ -23670,6 +25019,7 @@ pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { transmute(a) } @@ -23683,6 +25033,7 @@ pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { transmute(a) } @@ -23696,6 +25047,7 @@ pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { transmute(a) } @@ -23709,6 +25061,7 @@ pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { transmute(a) } @@ -23722,6 +25075,7 @@ pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { transmute(a) } @@ -23735,6 +25089,7 @@ pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { transmute(a) } @@ -23748,6 +25103,7 @@ pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { transmute(a) } @@ -23761,6 +25117,7 @@ pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { transmute(a) } @@ -23774,6 +25131,7 @@ pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { transmute(a) } @@ -23787,6 +25145,7 @@ pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { transmute(a) } @@ -23800,6 +25159,7 @@ pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { transmute(a) } @@ -23813,6 +25173,7 @@ pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { transmute(a) } @@ -23826,6 +25187,7 @@ pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { transmute(a) } @@ -23839,6 +25201,7 @@ pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { transmute(a) } @@ -23852,6 +25215,7 @@ pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { transmute(a) } @@ -23865,6 +25229,7 @@ pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { transmute(a) } @@ -23878,6 +25243,7 @@ pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { transmute(a) } @@ -23891,6 +25257,7 @@ pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { transmute(a) } @@ -23904,6 +25271,7 @@ pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { transmute(a) } @@ -23917,6 +25285,7 @@ pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { transmute(a) } @@ -23930,6 +25299,7 @@ pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { transmute(a) } @@ -23943,6 +25313,7 @@ pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { transmute(a) } @@ -23956,6 +25327,7 @@ pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { transmute(a) } @@ -23969,6 +25341,7 @@ pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { transmute(a) } @@ -23982,6 +25355,7 @@ pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { transmute(a) } @@ -23995,6 +25369,7 @@ pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { transmute(a) } @@ -24008,6 +25383,7 @@ pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { transmute(a) } @@ -24021,6 +25397,7 @@ pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { transmute(a) } @@ -24034,6 +25411,7 @@ pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { transmute(a) } @@ -24047,6 +25425,7 @@ pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { transmute(a) } @@ -24060,6 +25439,7 @@ pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { transmute(a) } @@ -24073,6 +25453,7 @@ pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { transmute(a) } @@ -24086,6 +25467,7 @@ pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { transmute(a) } @@ -24099,6 +25481,7 @@ pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { transmute(a) } @@ -24112,6 +25495,7 @@ pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { transmute(a) } @@ -24125,6 +25509,7 @@ pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { transmute(a) } @@ -24138,6 +25523,7 @@ pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { transmute(a) } @@ -24151,6 +25537,7 @@ pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { transmute(a) } @@ -24164,6 +25551,7 @@ pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { transmute(a) } @@ -24177,6 +25565,7 @@ pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { transmute(a) } @@ -24190,6 +25579,7 @@ pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { transmute(a) } @@ -24203,6 +25593,7 @@ pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { transmute(a) } @@ -24216,6 +25607,7 @@ pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { transmute(a) } @@ -24229,6 +25621,7 @@ pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { transmute(a) } @@ -24242,6 +25635,7 @@ pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { transmute(a) } @@ -24255,6 +25649,7 @@ pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { transmute(a) } @@ -24268,6 +25663,7 @@ pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { transmute(a) } @@ -24281,6 +25677,7 @@ pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { transmute(a) } @@ -24294,6 +25691,7 @@ pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { transmute(a) } @@ -24307,6 +25705,7 @@ pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { transmute(a) } @@ -24320,6 +25719,7 @@ pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { transmute(a) } @@ -24333,6 +25733,7 @@ pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { transmute(a) } @@ -24346,6 +25747,7 @@ pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { transmute(a) } @@ -24359,6 +25761,7 @@ pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { transmute(a) } @@ -24372,6 +25775,7 @@ pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { transmute(a) } @@ -24385,6 +25789,7 @@ pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { transmute(a) } @@ -24398,6 +25803,7 @@ pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { transmute(a) } @@ -24411,6 +25817,7 @@ pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { transmute(a) } @@ -24424,6 +25831,7 @@ pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { transmute(a) } @@ -24437,6 +25845,7 @@ pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { transmute(a) } @@ -24450,6 +25859,7 @@ pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { transmute(a) } @@ -24463,6 +25873,7 @@ pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { transmute(a) } @@ -24476,6 +25887,7 @@ pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { transmute(a) } @@ -24489,6 +25901,7 @@ pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { transmute(a) } @@ -24502,6 +25915,7 @@ pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { transmute(a) } @@ -24515,6 +25929,7 @@ pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { transmute(a) } @@ -24528,6 +25943,7 @@ pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { transmute(a) } @@ -24541,6 +25957,7 @@ pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { transmute(a) } @@ -24554,6 +25971,7 @@ pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { transmute(a) } @@ -24567,6 +25985,7 @@ pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { transmute(a) } @@ -24580,6 +25999,7 @@ pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { transmute(a) } @@ -24593,6 +26013,7 @@ pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { transmute(a) } @@ -24606,6 +26027,7 @@ pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { transmute(a) } @@ -24619,6 +26041,7 @@ pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { transmute(a) } @@ -24632,6 +26055,7 @@ pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { transmute(a) } @@ -24645,6 +26069,7 @@ pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { transmute(a) } @@ -24658,6 +26083,7 @@ pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { transmute(a) } @@ -24671,6 +26097,7 @@ pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { transmute(a) } @@ -24684,6 +26111,7 @@ pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { transmute(a) } @@ -24697,6 +26125,7 @@ pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { transmute(a) } @@ -24710,6 +26139,7 @@ pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { transmute(a) } @@ -24723,6 +26153,7 @@ pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { transmute(a) } @@ -24736,6 +26167,7 @@ pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { transmute(a) } @@ -24749,6 +26181,7 @@ pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { transmute(a) } @@ -24762,6 +26195,7 @@ pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { transmute(a) } @@ -24775,6 +26209,7 @@ pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { transmute(a) } @@ -24788,6 +26223,7 @@ pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { transmute(a) } @@ -24801,6 +26237,7 @@ pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { transmute(a) } @@ -24814,6 +26251,7 @@ pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { transmute(a) } @@ -24827,6 +26265,7 @@ pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { transmute(a) } @@ -24840,6 +26279,7 @@ pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { transmute(a) } @@ -24853,6 +26293,7 @@ pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { transmute(a) } @@ -24866,6 +26307,7 @@ pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { transmute(a) } @@ -24879,6 +26321,7 @@ pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { transmute(a) } @@ -24892,6 +26335,7 @@ pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { transmute(a) } @@ -24905,6 +26349,7 @@ pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { transmute(a) } @@ -24918,6 +26363,7 @@ pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { transmute(a) } @@ -24931,6 +26377,7 @@ pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { transmute(a) } @@ -24944,6 +26391,7 @@ pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { transmute(a) } @@ -24957,6 +26405,7 @@ pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { transmute(a) } @@ -24970,6 +26419,7 @@ pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { transmute(a) } @@ -24983,6 +26433,7 @@ pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { transmute(a) } @@ -24996,6 +26447,7 @@ pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { transmute(a) } @@ -25009,6 +26461,7 @@ pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { transmute(a) } @@ -25022,6 +26475,7 @@ pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { transmute(a) } @@ -25035,6 +26489,7 @@ pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { transmute(a) } @@ -25048,6 +26503,7 @@ pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { transmute(a) } @@ -25061,6 +26517,7 @@ pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { transmute(a) } @@ -25074,6 +26531,7 @@ pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { transmute(a) } @@ -25087,6 +26545,7 @@ pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { transmute(a) } @@ -25100,6 +26559,7 @@ pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { transmute(a) } @@ -25113,6 +26573,7 @@ pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { transmute(a) } @@ -25126,6 +26587,7 @@ pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { transmute(a) } @@ -25139,6 +26601,7 @@ pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { transmute(a) } @@ -25152,6 +26615,7 @@ pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { transmute(a) } @@ -25165,6 +26629,7 @@ pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { transmute(a) } @@ -25178,6 +26643,7 @@ pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { transmute(a) } @@ -25191,6 +26657,7 @@ pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { transmute(a) } @@ -25204,6 +26671,7 @@ pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { transmute(a) } @@ -25217,6 +26685,7 @@ pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { transmute(a) } @@ -25230,6 +26699,7 @@ pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { transmute(a) } @@ -25243,6 +26713,7 @@ pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { transmute(a) } @@ -25256,6 +26727,7 @@ pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { transmute(a) } @@ -25269,6 +26741,7 @@ pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { transmute(a) } @@ -25282,6 +26755,7 @@ pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { transmute(a) } @@ -25295,6 +26769,7 @@ pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { transmute(a) } @@ -25308,6 +26783,7 @@ pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { transmute(a) } @@ -25321,6 +26797,7 @@ pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { transmute(a) } @@ -25334,6 +26811,7 @@ pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { transmute(a) } @@ -25347,6 +26825,7 @@ pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { transmute(a) } @@ -25360,6 +26839,7 @@ pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { transmute(a) } @@ -25373,6 +26853,7 @@ pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { transmute(a) } @@ -25386,6 +26867,7 @@ pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { transmute(a) } @@ -25399,6 +26881,7 @@ pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { transmute(a) } @@ -25412,6 +26895,7 @@ pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { transmute(a) } @@ -25425,6 +26909,7 @@ pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { transmute(a) } @@ -25438,6 +26923,7 @@ pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { transmute(a) } @@ -25451,6 +26937,7 @@ pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { transmute(a) } @@ -25464,6 +26951,7 @@ pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { transmute(a) } @@ -25477,6 +26965,7 @@ pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { transmute(a) } @@ -25490,6 +26979,7 @@ pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { transmute(a) } @@ -25503,6 +26993,7 @@ pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { transmute(a) } @@ -25516,6 +27007,7 @@ pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { transmute(a) } @@ -25529,6 +27021,7 @@ pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { transmute(a) } @@ -25542,6 +27035,7 @@ pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { transmute(a) } @@ -25555,6 +27049,7 @@ pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { transmute(a) } @@ -25568,6 +27063,7 @@ pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { transmute(a) } @@ -25581,6 +27077,7 @@ pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { transmute(a) } @@ -25594,6 +27091,7 @@ pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { transmute(a) } @@ -25607,6 +27105,7 @@ pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { transmute(a) } @@ -25620,6 +27119,7 @@ pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { transmute(a) } @@ -25633,6 +27133,7 @@ pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { transmute(a) } @@ -25646,6 +27147,7 @@ pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { transmute(a) } @@ -25659,6 +27161,7 @@ pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { transmute(a) } @@ -25672,6 +27175,7 @@ pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { transmute(a) } @@ -25685,6 +27189,7 @@ pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { transmute(a) } @@ -25698,6 +27203,7 @@ pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { transmute(a) } @@ -25711,6 +27217,7 @@ pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { transmute(a) } @@ -25724,6 +27231,7 @@ pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { transmute(a) } @@ -25737,6 +27245,7 @@ pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { transmute(a) } @@ -25750,6 +27259,7 @@ pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { transmute(a) } @@ -25763,6 +27273,7 @@ pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { transmute(a) } @@ -25776,6 +27287,7 @@ pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { transmute(a) } @@ -25789,6 +27301,7 @@ pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { transmute(a) } @@ -25802,6 +27315,7 @@ pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { transmute(a) } @@ -25815,6 +27329,7 @@ pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { transmute(a) } @@ -25828,6 +27343,7 @@ pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { transmute(a) } @@ -25841,6 +27357,7 @@ pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { transmute(a) } @@ -25854,6 +27371,7 @@ pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { transmute(a) } @@ -25867,6 +27385,7 @@ pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { transmute(a) } @@ -25880,6 +27399,7 @@ pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { transmute(a) } @@ -25893,6 +27413,7 @@ pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { transmute(a) } @@ -25906,6 +27427,7 @@ pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { transmute(a) } @@ -25919,6 +27441,7 @@ pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { transmute(a) } @@ -25932,6 +27455,7 @@ pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { transmute(a) } @@ -25945,6 +27469,7 @@ pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { transmute(a) } @@ -25958,6 +27483,7 @@ pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { transmute(a) } @@ -25971,6 +27497,7 @@ pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { transmute(a) } @@ -25984,6 +27511,7 @@ pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { transmute(a) } @@ -25997,6 +27525,7 @@ pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { transmute(a) } @@ -26010,6 +27539,7 @@ pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { transmute(a) } @@ -26023,6 +27553,7 @@ pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { transmute(a) } @@ -26036,6 +27567,7 @@ pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { transmute(a) } @@ -26049,6 +27581,7 @@ pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { transmute(a) } @@ -26062,6 +27595,7 @@ pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { transmute(a) } @@ -26075,6 +27609,7 @@ pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { transmute(a) } @@ -26088,6 +27623,7 @@ pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { transmute(a) } @@ -26101,6 +27637,7 @@ pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { transmute(a) } @@ -26114,6 +27651,7 @@ pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26133,6 +27671,7 @@ vrshl_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26152,6 +27691,7 @@ vrshlq_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26171,6 +27711,7 @@ vrshl_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26190,6 +27731,7 @@ vrshlq_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26209,6 +27751,7 @@ vrshl_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26228,6 +27771,7 @@ vrshlq_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26247,6 +27791,7 @@ vrshl_s64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26266,6 +27811,7 @@ vrshlq_s64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26285,6 +27831,7 @@ vrshl_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26304,6 +27851,7 @@ vrshlq_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26323,6 +27871,7 @@ vrshl_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26342,6 +27891,7 @@ vrshlq_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26361,6 +27911,7 @@ vrshl_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26380,6 +27931,7 @@ vrshlq_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26399,6 +27951,7 @@ vrshl_u64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26419,6 +27972,7 @@ vrshlq_u64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); vrshl_s8(a, vdup_n_s8(-N as _)) @@ -26434,6 +27988,7 @@ pub unsafe fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { static_assert!(N >= 1 && N <= 8); vrshlq_s8(a, vdupq_n_s8(-N as _)) @@ -26449,6 +28004,7 @@ pub unsafe fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); vrshl_s16(a, vdup_n_s16(-N as _)) @@ -26464,6 +28020,7 @@ pub unsafe fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { static_assert!(N >= 1 && N <= 16); vrshlq_s16(a, vdupq_n_s16(-N as _)) @@ -26479,6 +28036,7 @@ pub unsafe fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); vrshl_s32(a, vdup_n_s32(-N as _)) @@ -26494,6 +28052,7 @@ pub unsafe fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { static_assert!(N >= 1 && N <= 32); vrshlq_s32(a, vdupq_n_s32(-N as _)) @@ -26509,6 +28068,7 @@ pub unsafe fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { static_assert!(N >= 1 && N <= 64); vrshl_s64(a, vdup_n_s64(-N as _)) @@ -26524,6 +28084,7 @@ pub unsafe fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { static_assert!(N >= 1 && N <= 64); vrshlq_s64(a, vdupq_n_s64(-N as _)) @@ -26539,6 +28100,7 @@ pub unsafe fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); vrshl_u8(a, vdup_n_s8(-N as _)) @@ -26554,6 +28116,7 @@ pub unsafe fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { static_assert!(N >= 1 && N <= 8); vrshlq_u8(a, vdupq_n_s8(-N as _)) @@ -26569,6 +28132,7 @@ pub unsafe fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); vrshl_u16(a, vdup_n_s16(-N as _)) @@ -26584,6 +28148,7 @@ pub unsafe fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { static_assert!(N >= 1 && N <= 16); vrshlq_u16(a, vdupq_n_s16(-N as _)) @@ -26599,6 +28164,7 @@ pub unsafe fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); vrshl_u32(a, vdup_n_s32(-N as _)) @@ -26614,6 +28180,7 @@ pub unsafe fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { static_assert!(N >= 1 && N <= 32); vrshlq_u32(a, vdupq_n_s32(-N as _)) @@ -26629,6 +28196,7 @@ pub unsafe fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { static_assert!(N >= 1 && N <= 64); vrshl_u64(a, vdup_n_s64(-N as _)) @@ -26644,6 +28212,7 @@ pub unsafe fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { static_assert!(N >= 1 && N <= 64); vrshlq_u64(a, vdupq_n_s64(-N as _)) @@ -26657,6 +28226,7 @@ pub unsafe fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); #[allow(improper_ctypes)] @@ -26694,6 +28264,7 @@ vrshrn_n_s16_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); #[allow(improper_ctypes)] @@ -26731,6 +28302,7 @@ vrshrn_n_s32_(a, N) #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vrshrn, N = 2))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub unsafe fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); #[allow(improper_ctypes)] @@ -26770,6 +28342,7 @@ vrshrn_n_s64_(a, N) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); transmute(vrshrn_n_s16::(transmute(a))) @@ -26785,6 +28358,7 @@ pub unsafe fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); transmute(vrshrn_n_s32::(transmute(a))) @@ -26800,6 +28374,7 @@ pub unsafe fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); transmute(vrshrn_n_s64::(transmute(a))) @@ -26815,6 +28390,7 @@ pub unsafe fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); simd_add(a, vrshr_n_s8::(b)) @@ -26830,6 +28406,7 @@ pub unsafe fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert!(N >= 1 && N <= 8); simd_add(a, vrshrq_n_s8::(b)) @@ -26845,6 +28422,7 @@ pub unsafe fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); simd_add(a, vrshr_n_s16::(b)) @@ -26860,6 +28438,7 @@ pub unsafe fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert!(N >= 1 && N <= 16); simd_add(a, vrshrq_n_s16::(b)) @@ -26875,6 +28454,7 @@ pub unsafe fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_ #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); simd_add(a, vrshr_n_s32::(b)) @@ -26890,6 +28470,7 @@ pub unsafe fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N >= 1 && N <= 32); simd_add(a, vrshrq_n_s32::(b)) @@ -26905,6 +28486,7 @@ pub unsafe fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_ #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(N >= 1 && N <= 64); simd_add(a, vrshr_n_s64::(b)) @@ -26920,6 +28502,7 @@ pub unsafe fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N >= 1 && N <= 64); simd_add(a, vrshrq_n_s64::(b)) @@ -26935,6 +28518,7 @@ pub unsafe fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_ #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); simd_add(a, vrshr_n_u8::(b)) @@ -26950,6 +28534,7 @@ pub unsafe fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert!(N >= 1 && N <= 8); simd_add(a, vrshrq_n_u8::(b)) @@ -26965,6 +28550,7 @@ pub unsafe fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x1 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); simd_add(a, vrshr_n_u16::(b)) @@ -26980,6 +28566,7 @@ pub unsafe fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert!(N >= 1 && N <= 16); simd_add(a, vrshrq_n_u16::(b)) @@ -26995,6 +28582,7 @@ pub unsafe fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); simd_add(a, vrshr_n_u32::(b)) @@ -27010,6 +28598,7 @@ pub unsafe fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert!(N >= 1 && N <= 32); simd_add(a, vrshrq_n_u32::(b)) @@ -27025,6 +28614,7 @@ pub unsafe fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { static_assert!(N >= 1 && N <= 64); simd_add(a, vrshr_n_u64::(b)) @@ -27040,6 +28630,7 @@ pub unsafe fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert!(N >= 1 && N <= 64); simd_add(a, vrshrq_n_u64::(b)) @@ -27054,6 +28645,7 @@ pub unsafe fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27073,6 +28665,7 @@ vrsubhn_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27092,6 +28685,7 @@ vrsubhn_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27111,6 +28705,7 @@ vrsubhn_s64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { transmute(vrsubhn_s16(transmute(a), transmute(b))) } @@ -27124,6 +28719,7 @@ pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { transmute(vrsubhn_s32(transmute(a), transmute(b))) } @@ -27137,6 +28733,7 @@ pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { transmute(vrsubhn_s64(transmute(a), transmute(b))) } @@ -27151,6 +28748,7 @@ pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { static_assert_uimm_bits!(LANE, 3); simd_insert(b, LANE as u32, a) @@ -27166,6 +28764,7 @@ pub unsafe fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t { static_assert_uimm_bits!(LANE, 2); simd_insert(b, LANE as u32, a) @@ -27181,6 +28780,7 @@ pub unsafe fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 1); simd_insert(b, LANE as u32, a) @@ -27196,6 +28796,7 @@ pub unsafe fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t { static_assert!(LANE == 0); simd_insert(b, LANE as u32, a) @@ -27211,6 +28812,7 @@ pub unsafe fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { static_assert_uimm_bits!(LANE, 3); simd_insert(b, LANE as u32, a) @@ -27226,6 +28828,7 @@ pub unsafe fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_t { static_assert_uimm_bits!(LANE, 2); simd_insert(b, LANE as u32, a) @@ -27241,6 +28844,7 @@ pub unsafe fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_ #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_t { static_assert_uimm_bits!(LANE, 1); simd_insert(b, LANE as u32, a) @@ -27256,6 +28860,7 @@ pub unsafe fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_ #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_t { static_assert!(LANE == 0); simd_insert(b, LANE as u32, a) @@ -27271,6 +28876,7 @@ pub unsafe fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_ #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { static_assert_uimm_bits!(LANE, 3); simd_insert(b, LANE as u32, a) @@ -27286,6 +28892,7 @@ pub unsafe fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_t { static_assert_uimm_bits!(LANE, 2); simd_insert(b, LANE as u32, a) @@ -27301,6 +28908,7 @@ pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_ #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_t { static_assert!(LANE == 0); simd_insert(b, LANE as u32, a) @@ -27316,6 +28924,7 @@ pub unsafe fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_ #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { static_assert_uimm_bits!(LANE, 4); simd_insert(b, LANE as u32, a) @@ -27331,6 +28940,7 @@ pub unsafe fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t { static_assert_uimm_bits!(LANE, 3); simd_insert(b, LANE as u32, a) @@ -27346,6 +28956,7 @@ pub unsafe fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 2); simd_insert(b, LANE as u32, a) @@ -27361,6 +28972,7 @@ pub unsafe fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t { static_assert_uimm_bits!(LANE, 1); simd_insert(b, LANE as u32, a) @@ -27376,6 +28988,7 @@ pub unsafe fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t { static_assert_uimm_bits!(LANE, 4); simd_insert(b, LANE as u32, a) @@ -27391,6 +29004,7 @@ pub unsafe fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8_t { static_assert_uimm_bits!(LANE, 3); simd_insert(b, LANE as u32, a) @@ -27406,6 +29020,7 @@ pub unsafe fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 2); simd_insert(b, LANE as u32, a) @@ -27421,6 +29036,7 @@ pub unsafe fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2_t { static_assert_uimm_bits!(LANE, 1); simd_insert(b, LANE as u32, a) @@ -27436,6 +29052,7 @@ pub unsafe fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t { static_assert_uimm_bits!(LANE, 4); simd_insert(b, LANE as u32, a) @@ -27451,6 +29068,7 @@ pub unsafe fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8_t { static_assert_uimm_bits!(LANE, 3); simd_insert(b, LANE as u32, a) @@ -27466,6 +29084,7 @@ pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2_t { static_assert_uimm_bits!(LANE, 1); simd_insert(b, LANE as u32, a) @@ -27481,6 +29100,7 @@ pub unsafe fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x2_t { static_assert_uimm_bits!(LANE, 1); simd_insert(b, LANE as u32, a) @@ -27496,6 +29116,7 @@ pub unsafe fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32x4_t { static_assert_uimm_bits!(LANE, 2); simd_insert(b, LANE as u32, a) @@ -27510,6 +29131,7 @@ pub unsafe fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27529,6 +29151,7 @@ vshl_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27548,6 +29171,7 @@ vshlq_s8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27567,6 +29191,7 @@ vshl_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27586,6 +29211,7 @@ vshlq_s16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27605,6 +29231,7 @@ vshl_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27624,6 +29251,7 @@ vshlq_s32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27643,6 +29271,7 @@ vshl_s64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27662,6 +29291,7 @@ vshlq_s64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27681,6 +29311,7 @@ vshl_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27700,6 +29331,7 @@ vshlq_u8_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27719,6 +29351,7 @@ vshl_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27738,6 +29371,7 @@ vshlq_u16_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27757,6 +29391,7 @@ vshl_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27776,6 +29411,7 @@ vshlq_u32_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27795,6 +29431,7 @@ vshl_u64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27815,6 +29452,7 @@ vshlq_u64_(a, b) #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { static_assert_uimm_bits!(N, 3); simd_shl(a, vdup_n_s8(N as _)) @@ -27830,6 +29468,7 @@ pub unsafe fn vshl_n_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { static_assert_uimm_bits!(N, 3); simd_shl(a, vdupq_n_s8(N as _)) @@ -27845,6 +29484,7 @@ pub unsafe fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { static_assert_uimm_bits!(N, 4); simd_shl(a, vdup_n_s16(N as _)) @@ -27860,6 +29500,7 @@ pub unsafe fn vshl_n_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { static_assert_uimm_bits!(N, 4); simd_shl(a, vdupq_n_s16(N as _)) @@ -27875,6 +29516,7 @@ pub unsafe fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { static_assert_uimm_bits!(N, 5); simd_shl(a, vdup_n_s32(N as _)) @@ -27890,6 +29532,7 @@ pub unsafe fn vshl_n_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { static_assert_uimm_bits!(N, 5); simd_shl(a, vdupq_n_s32(N as _)) @@ -27905,6 +29548,7 @@ pub unsafe fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { static_assert_uimm_bits!(N, 3); simd_shl(a, vdup_n_u8(N as _)) @@ -27920,6 +29564,7 @@ pub unsafe fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { static_assert_uimm_bits!(N, 3); simd_shl(a, vdupq_n_u8(N as _)) @@ -27935,6 +29580,7 @@ pub unsafe fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { static_assert_uimm_bits!(N, 4); simd_shl(a, vdup_n_u16(N as _)) @@ -27950,6 +29596,7 @@ pub unsafe fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { static_assert_uimm_bits!(N, 4); simd_shl(a, vdupq_n_u16(N as _)) @@ -27965,6 +29612,7 @@ pub unsafe fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { static_assert_uimm_bits!(N, 5); simd_shl(a, vdup_n_u32(N as _)) @@ -27980,6 +29628,7 @@ pub unsafe fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { static_assert_uimm_bits!(N, 5); simd_shl(a, vdupq_n_u32(N as _)) @@ -27995,6 +29644,7 @@ pub unsafe fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshl_n_s64(a: int64x1_t) -> int64x1_t { static_assert_uimm_bits!(N, 6); simd_shl(a, vdup_n_s64(N as _)) @@ -28010,6 +29660,7 @@ pub unsafe fn vshl_n_s64(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { static_assert_uimm_bits!(N, 6); simd_shl(a, vdupq_n_s64(N as _)) @@ -28025,6 +29676,7 @@ pub unsafe fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t { static_assert_uimm_bits!(N, 6); simd_shl(a, vdup_n_u64(N as _)) @@ -28040,6 +29692,7 @@ pub unsafe fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { static_assert_uimm_bits!(N, 6); simd_shl(a, vdupq_n_u64(N as _)) @@ -28055,6 +29708,7 @@ pub unsafe fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshll, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { static_assert!(N >= 0 && N <= 8); simd_shl(simd_cast(a), vdupq_n_s16(N as _)) @@ -28070,6 +29724,7 @@ pub unsafe fn vshll_n_s8(a: int8x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshll, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { static_assert!(N >= 0 && N <= 16); simd_shl(simd_cast(a), vdupq_n_s32(N as _)) @@ -28085,6 +29740,7 @@ pub unsafe fn vshll_n_s16(a: int16x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshll, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { static_assert!(N >= 0 && N <= 32); simd_shl(simd_cast(a), vdupq_n_s64(N as _)) @@ -28100,6 +29756,7 @@ pub unsafe fn vshll_n_s32(a: int32x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushll, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { static_assert!(N >= 0 && N <= 8); simd_shl(simd_cast(a), vdupq_n_u16(N as _)) @@ -28115,6 +29772,7 @@ pub unsafe fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushll, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { static_assert!(N >= 0 && N <= 16); simd_shl(simd_cast(a), vdupq_n_u32(N as _)) @@ -28130,6 +29788,7 @@ pub unsafe fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushll, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { static_assert!(N >= 0 && N <= 32); simd_shl(simd_cast(a), vdupq_n_u64(N as _)) @@ -28145,6 +29804,7 @@ pub unsafe fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); let n: i32 = if N == 8 { 7 } else { N }; @@ -28161,6 +29821,7 @@ pub unsafe fn vshr_n_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { static_assert!(N >= 1 && N <= 8); let n: i32 = if N == 8 { 7 } else { N }; @@ -28177,6 +29838,7 @@ pub unsafe fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); let n: i32 = if N == 16 { 15 } else { N }; @@ -28193,6 +29855,7 @@ pub unsafe fn vshr_n_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { static_assert!(N >= 1 && N <= 16); let n: i32 = if N == 16 { 15 } else { N }; @@ -28209,6 +29872,7 @@ pub unsafe fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); let n: i32 = if N == 32 { 31 } else { N }; @@ -28225,6 +29889,7 @@ pub unsafe fn vshr_n_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { static_assert!(N >= 1 && N <= 32); let n: i32 = if N == 32 { 31 } else { N }; @@ -28241,6 +29906,7 @@ pub unsafe fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshr_n_s64(a: int64x1_t) -> int64x1_t { static_assert!(N >= 1 && N <= 64); let n: i32 = if N == 64 { 63 } else { N }; @@ -28257,6 +29923,7 @@ pub unsafe fn vshr_n_s64(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { static_assert!(N >= 1 && N <= 64); let n: i32 = if N == 64 { 63 } else { N }; @@ -28273,6 +29940,7 @@ pub unsafe fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); let n: i32 = if N == 8 { return vdup_n_u8(0); } else { N }; @@ -28289,6 +29957,7 @@ pub unsafe fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { static_assert!(N >= 1 && N <= 8); let n: i32 = if N == 8 { return vdupq_n_u8(0); } else { N }; @@ -28305,6 +29974,7 @@ pub unsafe fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); let n: i32 = if N == 16 { return vdup_n_u16(0); } else { N }; @@ -28321,6 +29991,7 @@ pub unsafe fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { static_assert!(N >= 1 && N <= 16); let n: i32 = if N == 16 { return vdupq_n_u16(0); } else { N }; @@ -28337,6 +30008,7 @@ pub unsafe fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); let n: i32 = if N == 32 { return vdup_n_u32(0); } else { N }; @@ -28353,6 +30025,7 @@ pub unsafe fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { static_assert!(N >= 1 && N <= 32); let n: i32 = if N == 32 { return vdupq_n_u32(0); } else { N }; @@ -28369,6 +30042,7 @@ pub unsafe fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { static_assert!(N >= 1 && N <= 64); let n: i32 = if N == 64 { return vdup_n_u64(0); } else { N }; @@ -28385,6 +30059,7 @@ pub unsafe fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { static_assert!(N >= 1 && N <= 64); let n: i32 = if N == 64 { return vdupq_n_u64(0); } else { N }; @@ -28401,6 +30076,7 @@ pub unsafe fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); simd_cast(simd_shr(a, vdupq_n_s16(N as _))) @@ -28416,6 +30092,7 @@ pub unsafe fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); simd_cast(simd_shr(a, vdupq_n_s32(N as _))) @@ -28431,6 +30108,7 @@ pub unsafe fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); simd_cast(simd_shr(a, vdupq_n_s64(N as _))) @@ -28446,6 +30124,7 @@ pub unsafe fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); simd_cast(simd_shr(a, vdupq_n_u16(N as _))) @@ -28461,6 +30140,7 @@ pub unsafe fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); simd_cast(simd_shr(a, vdupq_n_u32(N as _))) @@ -28476,6 +30156,7 @@ pub unsafe fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); simd_cast(simd_shr(a, vdupq_n_u64(N as _))) @@ -28491,6 +30172,7 @@ pub unsafe fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(N >= 1 && N <= 8); simd_add(a, vshr_n_s8::(b)) @@ -28506,6 +30188,7 @@ pub unsafe fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert!(N >= 1 && N <= 8); simd_add(a, vshrq_n_s8::(b)) @@ -28521,6 +30204,7 @@ pub unsafe fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert!(N >= 1 && N <= 16); simd_add(a, vshr_n_s16::(b)) @@ -28536,6 +30220,7 @@ pub unsafe fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert!(N >= 1 && N <= 16); simd_add(a, vshrq_n_s16::(b)) @@ -28551,6 +30236,7 @@ pub unsafe fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N >= 1 && N <= 32); simd_add(a, vshr_n_s32::(b)) @@ -28566,6 +30252,7 @@ pub unsafe fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N >= 1 && N <= 32); simd_add(a, vshrq_n_s32::(b)) @@ -28581,6 +30268,7 @@ pub unsafe fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(N >= 1 && N <= 64); simd_add(a, vshr_n_s64::(b)) @@ -28596,6 +30284,7 @@ pub unsafe fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N >= 1 && N <= 64); simd_add(a, vshrq_n_s64::(b)) @@ -28611,6 +30300,7 @@ pub unsafe fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert!(N >= 1 && N <= 8); simd_add(a, vshr_n_u8::(b)) @@ -28626,6 +30316,7 @@ pub unsafe fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert!(N >= 1 && N <= 8); simd_add(a, vshrq_n_u8::(b)) @@ -28641,6 +30332,7 @@ pub unsafe fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert!(N >= 1 && N <= 16); simd_add(a, vshr_n_u16::(b)) @@ -28656,6 +30348,7 @@ pub unsafe fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert!(N >= 1 && N <= 16); simd_add(a, vshrq_n_u16::(b)) @@ -28671,6 +30364,7 @@ pub unsafe fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert!(N >= 1 && N <= 32); simd_add(a, vshr_n_u32::(b)) @@ -28686,6 +30380,7 @@ pub unsafe fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert!(N >= 1 && N <= 32); simd_add(a, vshrq_n_u32::(b)) @@ -28701,6 +30396,7 @@ pub unsafe fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { static_assert!(N >= 1 && N <= 64); simd_add(a, vshr_n_u64::(b)) @@ -28716,6 +30412,7 @@ pub unsafe fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert!(N >= 1 && N <= 64); simd_add(a, vshrq_n_u64::(b)) @@ -28730,6 +30427,7 @@ pub unsafe fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { let a1: int8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: int8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28745,6 +30443,7 @@ pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { let a1: int16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); let b1: int16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); @@ -28760,6 +30459,7 @@ pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { let a1: int8x16_t = simd_shuffle!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]); let b1: int8x16_t = simd_shuffle!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]); @@ -28775,6 +30475,7 @@ pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { let a1: int16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: int16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28790,6 +30491,7 @@ pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { let a1: int32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); let b1: int32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); @@ -28805,6 +30507,7 @@ pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { let a1: uint8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: uint8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28820,6 +30523,7 @@ pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { let a1: uint16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); let b1: uint16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); @@ -28835,6 +30539,7 @@ pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { let a1: uint8x16_t = simd_shuffle!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]); let b1: uint8x16_t = simd_shuffle!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]); @@ -28850,6 +30555,7 @@ pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { let a1: uint16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: uint16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28865,6 +30571,7 @@ pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { let a1: uint32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); let b1: uint32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); @@ -28880,6 +30587,7 @@ pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { let a1: poly8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: poly8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28895,6 +30603,7 @@ pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { let a1: poly16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); let b1: poly16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); @@ -28910,6 +30619,7 @@ pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { let a1: poly8x16_t = simd_shuffle!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]); let b1: poly8x16_t = simd_shuffle!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]); @@ -28925,6 +30635,7 @@ pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { let a1: poly16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: poly16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28940,6 +30651,7 @@ pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { let a1: int32x2_t = simd_shuffle!(a, b, [0, 2]); let b1: int32x2_t = simd_shuffle!(a, b, [1, 3]); @@ -28955,6 +30667,7 @@ pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { let a1: uint32x2_t = simd_shuffle!(a, b, [0, 2]); let b1: uint32x2_t = simd_shuffle!(a, b, [1, 3]); @@ -28970,6 +30683,7 @@ pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { let a1: float32x2_t = simd_shuffle!(a, b, [0, 2]); let b1: float32x2_t = simd_shuffle!(a, b, [1, 3]); @@ -28985,6 +30699,7 @@ pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { let a1: float32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]); let b1: float32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]); @@ -29000,6 +30715,7 @@ pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { let a0: int8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: int8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -29015,6 +30731,7 @@ pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { let a0: int16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); let b0: int16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); @@ -29030,6 +30747,7 @@ pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { let a0: uint8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: uint8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -29045,6 +30763,7 @@ pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { let a0: uint16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); let b0: uint16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); @@ -29060,6 +30779,7 @@ pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { let a0: poly8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: poly8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -29075,6 +30795,7 @@ pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { let a0: poly16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); let b0: poly16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); @@ -29090,6 +30811,7 @@ pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); @@ -29105,6 +30827,7 @@ pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); @@ -29120,6 +30843,7 @@ pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { let a0: int8x16_t = simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]); let b0: int8x16_t = simd_shuffle!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]); @@ -29135,6 +30859,7 @@ pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { let a0: int16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: int16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -29150,6 +30875,7 @@ pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { let a0: int32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); let b0: int32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); @@ -29165,6 +30891,7 @@ pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { let a0: uint8x16_t = simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]); let b0: uint8x16_t = simd_shuffle!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]); @@ -29180,6 +30907,7 @@ pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { let a0: uint16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: uint16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -29195,6 +30923,7 @@ pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { let a0: uint32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); let b0: uint32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); @@ -29210,6 +30939,7 @@ pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { let a0: poly8x16_t = simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]); let b0: poly8x16_t = simd_shuffle!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]); @@ -29225,6 +30955,7 @@ pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { let a0: poly16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: poly16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -29240,6 +30971,7 @@ pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); @@ -29255,6 +30987,7 @@ pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { let a0: float32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]); let b0: float32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]); @@ -29270,6 +31003,7 @@ pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { let a0: int8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: int8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29285,6 +31019,7 @@ pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { let a0: int16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); let b0: int16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); @@ -29300,6 +31035,7 @@ pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { let a0: int8x16_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]); let b0: int8x16_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]); @@ -29315,6 +31051,7 @@ pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { let a0: int16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: int16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29330,6 +31067,7 @@ pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { let a0: int32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); let b0: int32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); @@ -29345,6 +31083,7 @@ pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { let a0: uint8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: uint8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29360,6 +31099,7 @@ pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { let a0: uint16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); let b0: uint16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); @@ -29375,6 +31115,7 @@ pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { let a0: uint8x16_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]); let b0: uint8x16_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]); @@ -29390,6 +31131,7 @@ pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { let a0: uint16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: uint16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29405,6 +31147,7 @@ pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { let a0: uint32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); let b0: uint32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); @@ -29420,6 +31163,7 @@ pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { let a0: poly8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: poly8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29435,6 +31179,7 @@ pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { let a0: poly16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); let b0: poly16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); @@ -29450,6 +31195,7 @@ pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { let a0: poly8x16_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]); let b0: poly8x16_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]); @@ -29465,6 +31211,7 @@ pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { let a0: poly16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: poly16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29480,6 +31227,7 @@ pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]); let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]); @@ -29495,6 +31243,7 @@ pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]); let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]); @@ -29510,6 +31259,7 @@ pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]); let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]); @@ -29525,6 +31275,7 @@ pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { let a0: float32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]); let b0: float32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]); @@ -29540,6 +31291,7 @@ pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { let d: uint8x8_t = vabd_u8(b, c); simd_add(a, simd_cast(d)) @@ -29554,6 +31306,7 @@ pub unsafe fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { let d: uint16x4_t = vabd_u16(b, c); simd_add(a, simd_cast(d)) @@ -29568,6 +31321,7 @@ pub unsafe fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { let d: uint32x2_t = vabd_u32(b, c); simd_add(a, simd_cast(d)) @@ -29582,6 +31336,7 @@ pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { let d: int8x8_t = vabd_s8(b, c); let e: uint8x8_t = simd_cast(d); @@ -29597,6 +31352,7 @@ pub unsafe fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { let d: int16x4_t = vabd_s16(b, c); let e: uint16x4_t = simd_cast(d); @@ -29612,6 +31368,7 @@ pub unsafe fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabal))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { let d: int32x2_t = vabd_s32(b, c); let e: uint32x2_t = simd_cast(d); @@ -29627,6 +31384,7 @@ pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -29646,6 +31404,7 @@ vqabs_s8_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -29665,6 +31424,7 @@ vqabsq_s8_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -29684,6 +31444,7 @@ vqabs_s16_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -29703,6 +31464,7 @@ vqabsq_s16_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -29722,6 +31484,7 @@ vqabs_s32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { diff --git a/crates/core_arch/src/arm_shared/neon/mod.rs b/crates/core_arch/src/arm_shared/neon/mod.rs index 4e086543fb..b95837c44e 100644 --- a/crates/core_arch/src/arm_shared/neon/mod.rs +++ b/crates/core_arch/src/arm_shared/neon/mod.rs @@ -3,6 +3,8 @@ #[rustfmt::skip] mod generated; #[rustfmt::skip] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub use self::generated::*; use crate::{ @@ -19,89 +21,113 @@ pub(crate) type p128 = u128; types! { /// ARM-specific 64-bit wide vector of eight packed `i8`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct int8x8_t(pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8); /// ARM-specific 64-bit wide vector of eight packed `u8`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct uint8x8_t(pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8); /// ARM-specific 64-bit wide polynomial vector of eight packed `p8`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct poly8x8_t(pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8); /// ARM-specific 64-bit wide vector of four packed `i16`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct int16x4_t(pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16); /// ARM-specific 64-bit wide vector of four packed `u16`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct uint16x4_t(pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16); // FIXME: ARM-specific 64-bit wide vector of four packed `f16`. // pub struct float16x4_t(f16, f16, f16, f16); /// ARM-specific 64-bit wide vector of four packed `p16`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct poly16x4_t(pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16); /// ARM-specific 64-bit wide vector of two packed `i32`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct int32x2_t(pub(crate) i32, pub(crate) i32); /// ARM-specific 64-bit wide vector of two packed `u32`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct uint32x2_t(pub(crate) u32, pub(crate) u32); /// ARM-specific 64-bit wide vector of two packed `f32`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct float32x2_t(pub(crate) f32, pub(crate) f32); /// ARM-specific 64-bit wide vector of one packed `i64`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct int64x1_t(pub(crate) i64); /// ARM-specific 64-bit wide vector of one packed `u64`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct uint64x1_t(pub(crate) u64); /// ARM-specific 64-bit wide vector of one packed `p64`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct poly64x1_t(pub(crate) p64); /// ARM-specific 128-bit wide vector of sixteen packed `i8`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct int8x16_t( pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8 , pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8 , pub(crate) i8, pub(crate) i8, ); /// ARM-specific 128-bit wide vector of sixteen packed `u8`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct uint8x16_t( pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, ); /// ARM-specific 128-bit wide vector of sixteen packed `p8`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct poly8x16_t( pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, ); /// ARM-specific 128-bit wide vector of eight packed `i16`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct int16x8_t(pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16); /// ARM-specific 128-bit wide vector of eight packed `u16`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct uint16x8_t(pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16); // FIXME: ARM-specific 128-bit wide vector of eight packed `f16`. // pub struct float16x8_t(f16, f16, f16, f16, f16, f16, f16); /// ARM-specific 128-bit wide vector of eight packed `p16`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct poly16x8_t(pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16); /// ARM-specific 128-bit wide vector of four packed `i32`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct int32x4_t(pub(crate) i32, pub(crate) i32, pub(crate) i32, pub(crate) i32); /// ARM-specific 128-bit wide vector of four packed `u32`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct uint32x4_t(pub(crate) u32, pub(crate) u32, pub(crate) u32, pub(crate) u32); /// ARM-specific 128-bit wide vector of four packed `f32`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct float32x4_t(pub(crate) f32, pub(crate) f32, pub(crate) f32, pub(crate) f32); /// ARM-specific 128-bit wide vector of two packed `i64`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct int64x2_t(pub(crate) i64, pub(crate) i64); /// ARM-specific 128-bit wide vector of two packed `u64`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct uint64x2_t(pub(crate) u64, pub(crate) u64); /// ARM-specific 128-bit wide vector of two packed `p64`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(target_arch = "arm", unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800"))] pub struct poly64x2_t(pub(crate) p64, pub(crate) p64); } @@ -112,6 +138,10 @@ types! { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int8x8x2_t(pub int8x8_t, pub int8x8_t); /// ARM-specific type containing three `int8x8_t` vectors. #[repr(C)] @@ -120,6 +150,10 @@ pub struct int8x8x2_t(pub int8x8_t, pub int8x8_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int8x8x3_t(pub int8x8_t, pub int8x8_t, pub int8x8_t); /// ARM-specific type containing four `int8x8_t` vectors. #[repr(C)] @@ -128,6 +162,10 @@ pub struct int8x8x3_t(pub int8x8_t, pub int8x8_t, pub int8x8_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int8x8x4_t(pub int8x8_t, pub int8x8_t, pub int8x8_t, pub int8x8_t); /// ARM-specific type containing two `int8x16_t` vectors. @@ -137,6 +175,10 @@ pub struct int8x8x4_t(pub int8x8_t, pub int8x8_t, pub int8x8_t, pub int8x8_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int8x16x2_t(pub int8x16_t, pub int8x16_t); /// ARM-specific type containing three `int8x16_t` vectors. #[repr(C)] @@ -145,6 +187,10 @@ pub struct int8x16x2_t(pub int8x16_t, pub int8x16_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int8x16x3_t(pub int8x16_t, pub int8x16_t, pub int8x16_t); /// ARM-specific type containing four `int8x16_t` vectors. #[repr(C)] @@ -153,6 +199,10 @@ pub struct int8x16x3_t(pub int8x16_t, pub int8x16_t, pub int8x16_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int8x16x4_t(pub int8x16_t, pub int8x16_t, pub int8x16_t, pub int8x16_t); /// ARM-specific type containing two `uint8x8_t` vectors. @@ -162,6 +212,10 @@ pub struct int8x16x4_t(pub int8x16_t, pub int8x16_t, pub int8x16_t, pub int8x16_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint8x8x2_t(pub uint8x8_t, pub uint8x8_t); /// ARM-specific type containing three `uint8x8_t` vectors. #[repr(C)] @@ -170,6 +224,10 @@ pub struct uint8x8x2_t(pub uint8x8_t, pub uint8x8_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint8x8x3_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t); /// ARM-specific type containing four `uint8x8_t` vectors. #[repr(C)] @@ -178,6 +236,10 @@ pub struct uint8x8x3_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint8x8x4_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t, pub uint8x8_t); /// ARM-specific type containing two `uint8x16_t` vectors. @@ -187,6 +249,10 @@ pub struct uint8x8x4_t(pub uint8x8_t, pub uint8x8_t, pub uint8x8_t, pub uint8x8_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint8x16x2_t(pub uint8x16_t, pub uint8x16_t); /// ARM-specific type containing three `uint8x16_t` vectors. #[repr(C)] @@ -195,6 +261,10 @@ pub struct uint8x16x2_t(pub uint8x16_t, pub uint8x16_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint8x16x3_t(pub uint8x16_t, pub uint8x16_t, pub uint8x16_t); /// ARM-specific type containing four `uint8x16_t` vectors. #[repr(C)] @@ -203,6 +273,10 @@ pub struct uint8x16x3_t(pub uint8x16_t, pub uint8x16_t, pub uint8x16_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint8x16x4_t( pub uint8x16_t, pub uint8x16_t, @@ -217,6 +291,10 @@ pub struct uint8x16x4_t( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly8x8x2_t(pub poly8x8_t, pub poly8x8_t); /// ARM-specific type containing three `poly8x8_t` vectors. #[repr(C)] @@ -225,6 +303,10 @@ pub struct poly8x8x2_t(pub poly8x8_t, pub poly8x8_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly8x8x3_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t); /// ARM-specific type containing four `poly8x8_t` vectors. #[repr(C)] @@ -233,6 +315,10 @@ pub struct poly8x8x3_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly8x8x4_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t, pub poly8x8_t); /// ARM-specific type containing two `poly8x16_t` vectors. @@ -242,6 +328,10 @@ pub struct poly8x8x4_t(pub poly8x8_t, pub poly8x8_t, pub poly8x8_t, pub poly8x8_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly8x16x2_t(pub poly8x16_t, pub poly8x16_t); /// ARM-specific type containing three `poly8x16_t` vectors. #[repr(C)] @@ -250,6 +340,10 @@ pub struct poly8x16x2_t(pub poly8x16_t, pub poly8x16_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly8x16x3_t(pub poly8x16_t, pub poly8x16_t, pub poly8x16_t); /// ARM-specific type containing four `poly8x16_t` vectors. #[repr(C)] @@ -258,6 +352,10 @@ pub struct poly8x16x3_t(pub poly8x16_t, pub poly8x16_t, pub poly8x16_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly8x16x4_t( pub poly8x16_t, pub poly8x16_t, @@ -272,6 +370,10 @@ pub struct poly8x16x4_t( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int16x4x2_t(pub int16x4_t, pub int16x4_t); /// ARM-specific type containing three `int16x4_t` vectors. #[repr(C)] @@ -280,6 +382,10 @@ pub struct int16x4x2_t(pub int16x4_t, pub int16x4_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int16x4x3_t(pub int16x4_t, pub int16x4_t, pub int16x4_t); /// ARM-specific type containing four `int16x4_t` vectors. #[repr(C)] @@ -288,6 +394,10 @@ pub struct int16x4x3_t(pub int16x4_t, pub int16x4_t, pub int16x4_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int16x4x4_t(pub int16x4_t, pub int16x4_t, pub int16x4_t, pub int16x4_t); /// ARM-specific type containing two `int16x8_t` vectors. @@ -297,6 +407,10 @@ pub struct int16x4x4_t(pub int16x4_t, pub int16x4_t, pub int16x4_t, pub int16x4_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int16x8x2_t(pub int16x8_t, pub int16x8_t); /// ARM-specific type containing three `int16x8_t` vectors. #[repr(C)] @@ -305,6 +419,10 @@ pub struct int16x8x2_t(pub int16x8_t, pub int16x8_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int16x8x3_t(pub int16x8_t, pub int16x8_t, pub int16x8_t); /// ARM-specific type containing four `int16x8_t` vectors. #[repr(C)] @@ -313,6 +431,10 @@ pub struct int16x8x3_t(pub int16x8_t, pub int16x8_t, pub int16x8_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int16x8x4_t(pub int16x8_t, pub int16x8_t, pub int16x8_t, pub int16x8_t); /// ARM-specific type containing two `uint16x4_t` vectors. @@ -322,6 +444,10 @@ pub struct int16x8x4_t(pub int16x8_t, pub int16x8_t, pub int16x8_t, pub int16x8_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint16x4x2_t(pub uint16x4_t, pub uint16x4_t); /// ARM-specific type containing three `uint16x4_t` vectors. #[repr(C)] @@ -330,6 +456,10 @@ pub struct uint16x4x2_t(pub uint16x4_t, pub uint16x4_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint16x4x3_t(pub uint16x4_t, pub uint16x4_t, pub uint16x4_t); /// ARM-specific type containing four `uint16x4_t` vectors. #[repr(C)] @@ -338,6 +468,10 @@ pub struct uint16x4x3_t(pub uint16x4_t, pub uint16x4_t, pub uint16x4_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint16x4x4_t( pub uint16x4_t, pub uint16x4_t, @@ -352,6 +486,10 @@ pub struct uint16x4x4_t( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint16x8x2_t(pub uint16x8_t, pub uint16x8_t); /// ARM-specific type containing three `uint16x8_t` vectors. #[repr(C)] @@ -360,6 +498,10 @@ pub struct uint16x8x2_t(pub uint16x8_t, pub uint16x8_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint16x8x3_t(pub uint16x8_t, pub uint16x8_t, pub uint16x8_t); /// ARM-specific type containing four `uint16x8_t` vectors. #[repr(C)] @@ -368,6 +510,10 @@ pub struct uint16x8x3_t(pub uint16x8_t, pub uint16x8_t, pub uint16x8_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint16x8x4_t( pub uint16x8_t, pub uint16x8_t, @@ -382,6 +528,10 @@ pub struct uint16x8x4_t( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly16x4x2_t(pub poly16x4_t, pub poly16x4_t); /// ARM-specific type containing three `poly16x4_t` vectors. #[repr(C)] @@ -390,6 +540,10 @@ pub struct poly16x4x2_t(pub poly16x4_t, pub poly16x4_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly16x4x3_t(pub poly16x4_t, pub poly16x4_t, pub poly16x4_t); /// ARM-specific type containing four `poly16x4_t` vectors. #[repr(C)] @@ -398,6 +552,10 @@ pub struct poly16x4x3_t(pub poly16x4_t, pub poly16x4_t, pub poly16x4_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly16x4x4_t( pub poly16x4_t, pub poly16x4_t, @@ -412,6 +570,10 @@ pub struct poly16x4x4_t( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly16x8x2_t(pub poly16x8_t, pub poly16x8_t); /// ARM-specific type containing three `poly16x8_t` vectors. #[repr(C)] @@ -420,6 +582,10 @@ pub struct poly16x8x2_t(pub poly16x8_t, pub poly16x8_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly16x8x3_t(pub poly16x8_t, pub poly16x8_t, pub poly16x8_t); /// ARM-specific type containing four `poly16x8_t` vectors. #[repr(C)] @@ -428,6 +594,10 @@ pub struct poly16x8x3_t(pub poly16x8_t, pub poly16x8_t, pub poly16x8_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly16x8x4_t( pub poly16x8_t, pub poly16x8_t, @@ -442,6 +612,10 @@ pub struct poly16x8x4_t( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int32x2x2_t(pub int32x2_t, pub int32x2_t); /// ARM-specific type containing three `int32x2_t` vectors. #[repr(C)] @@ -450,6 +624,10 @@ pub struct int32x2x2_t(pub int32x2_t, pub int32x2_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int32x2x3_t(pub int32x2_t, pub int32x2_t, pub int32x2_t); /// ARM-specific type containing four `int32x2_t` vectors. #[repr(C)] @@ -458,6 +636,10 @@ pub struct int32x2x3_t(pub int32x2_t, pub int32x2_t, pub int32x2_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int32x2x4_t(pub int32x2_t, pub int32x2_t, pub int32x2_t, pub int32x2_t); /// ARM-specific type containing two `int32x4_t` vectors. @@ -467,6 +649,10 @@ pub struct int32x2x4_t(pub int32x2_t, pub int32x2_t, pub int32x2_t, pub int32x2_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int32x4x2_t(pub int32x4_t, pub int32x4_t); /// ARM-specific type containing three `int32x4_t` vectors. #[repr(C)] @@ -475,6 +661,10 @@ pub struct int32x4x2_t(pub int32x4_t, pub int32x4_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int32x4x3_t(pub int32x4_t, pub int32x4_t, pub int32x4_t); /// ARM-specific type containing four `int32x4_t` vectors. #[repr(C)] @@ -483,6 +673,10 @@ pub struct int32x4x3_t(pub int32x4_t, pub int32x4_t, pub int32x4_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int32x4x4_t(pub int32x4_t, pub int32x4_t, pub int32x4_t, pub int32x4_t); /// ARM-specific type containing two `uint32x2_t` vectors. @@ -492,6 +686,10 @@ pub struct int32x4x4_t(pub int32x4_t, pub int32x4_t, pub int32x4_t, pub int32x4_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint32x2x2_t(pub uint32x2_t, pub uint32x2_t); /// ARM-specific type containing three `uint32x2_t` vectors. #[repr(C)] @@ -500,6 +698,10 @@ pub struct uint32x2x2_t(pub uint32x2_t, pub uint32x2_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint32x2x3_t(pub uint32x2_t, pub uint32x2_t, pub uint32x2_t); /// ARM-specific type containing four `uint32x2_t` vectors. #[repr(C)] @@ -508,6 +710,10 @@ pub struct uint32x2x3_t(pub uint32x2_t, pub uint32x2_t, pub uint32x2_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint32x2x4_t( pub uint32x2_t, pub uint32x2_t, @@ -522,6 +728,10 @@ pub struct uint32x2x4_t( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint32x4x2_t(pub uint32x4_t, pub uint32x4_t); /// ARM-specific type containing three `uint32x4_t` vectors. #[repr(C)] @@ -530,6 +740,10 @@ pub struct uint32x4x2_t(pub uint32x4_t, pub uint32x4_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint32x4x3_t(pub uint32x4_t, pub uint32x4_t, pub uint32x4_t); /// ARM-specific type containing four `uint32x4_t` vectors. #[repr(C)] @@ -538,6 +752,10 @@ pub struct uint32x4x3_t(pub uint32x4_t, pub uint32x4_t, pub uint32x4_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint32x4x4_t( pub uint32x4_t, pub uint32x4_t, @@ -552,6 +770,10 @@ pub struct uint32x4x4_t( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct float32x2x2_t(pub float32x2_t, pub float32x2_t); /// ARM-specific type containing three `float32x2_t` vectors. #[repr(C)] @@ -560,6 +782,10 @@ pub struct float32x2x2_t(pub float32x2_t, pub float32x2_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct float32x2x3_t(pub float32x2_t, pub float32x2_t, pub float32x2_t); /// ARM-specific type containing four `float32x2_t` vectors. #[repr(C)] @@ -568,6 +794,10 @@ pub struct float32x2x3_t(pub float32x2_t, pub float32x2_t, pub float32x2_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct float32x2x4_t( pub float32x2_t, pub float32x2_t, @@ -582,6 +812,10 @@ pub struct float32x2x4_t( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct float32x4x2_t(pub float32x4_t, pub float32x4_t); /// ARM-specific type containing three `float32x4_t` vectors. #[repr(C)] @@ -590,6 +824,10 @@ pub struct float32x4x2_t(pub float32x4_t, pub float32x4_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct float32x4x3_t(pub float32x4_t, pub float32x4_t, pub float32x4_t); /// ARM-specific type containing four `float32x4_t` vectors. #[repr(C)] @@ -598,6 +836,10 @@ pub struct float32x4x3_t(pub float32x4_t, pub float32x4_t, pub float32x4_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct float32x4x4_t( pub float32x4_t, pub float32x4_t, @@ -612,6 +854,10 @@ pub struct float32x4x4_t( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int64x1x2_t(pub int64x1_t, pub int64x1_t); /// ARM-specific type containing four `int64x1_t` vectors. #[repr(C)] @@ -620,6 +866,10 @@ pub struct int64x1x2_t(pub int64x1_t, pub int64x1_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int64x1x3_t(pub int64x1_t, pub int64x1_t, pub int64x1_t); /// ARM-specific type containing four `int64x1_t` vectors. #[repr(C)] @@ -628,6 +878,10 @@ pub struct int64x1x3_t(pub int64x1_t, pub int64x1_t, pub int64x1_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int64x1x4_t(pub int64x1_t, pub int64x1_t, pub int64x1_t, pub int64x1_t); /// ARM-specific type containing four `int64x2_t` vectors. @@ -637,6 +891,10 @@ pub struct int64x1x4_t(pub int64x1_t, pub int64x1_t, pub int64x1_t, pub int64x1_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int64x2x2_t(pub int64x2_t, pub int64x2_t); /// ARM-specific type containing four `int64x2_t` vectors. #[repr(C)] @@ -645,6 +903,10 @@ pub struct int64x2x2_t(pub int64x2_t, pub int64x2_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int64x2x3_t(pub int64x2_t, pub int64x2_t, pub int64x2_t); /// ARM-specific type containing four `int64x2_t` vectors. #[repr(C)] @@ -653,6 +915,10 @@ pub struct int64x2x3_t(pub int64x2_t, pub int64x2_t, pub int64x2_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct int64x2x4_t(pub int64x2_t, pub int64x2_t, pub int64x2_t, pub int64x2_t); /// ARM-specific type containing four `uint64x1_t` vectors. @@ -662,6 +928,10 @@ pub struct int64x2x4_t(pub int64x2_t, pub int64x2_t, pub int64x2_t, pub int64x2_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint64x1x2_t(pub uint64x1_t, pub uint64x1_t); /// ARM-specific type containing four `uint64x1_t` vectors. #[repr(C)] @@ -670,6 +940,10 @@ pub struct uint64x1x2_t(pub uint64x1_t, pub uint64x1_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint64x1x3_t(pub uint64x1_t, pub uint64x1_t, pub uint64x1_t); /// ARM-specific type containing four `uint64x1_t` vectors. #[repr(C)] @@ -678,6 +952,10 @@ pub struct uint64x1x3_t(pub uint64x1_t, pub uint64x1_t, pub uint64x1_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint64x1x4_t( pub uint64x1_t, pub uint64x1_t, @@ -692,6 +970,10 @@ pub struct uint64x1x4_t( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint64x2x2_t(pub uint64x2_t, pub uint64x2_t); /// ARM-specific type containing four `uint64x2_t` vectors. #[repr(C)] @@ -700,6 +982,10 @@ pub struct uint64x2x2_t(pub uint64x2_t, pub uint64x2_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint64x2x3_t(pub uint64x2_t, pub uint64x2_t, pub uint64x2_t); /// ARM-specific type containing four `uint64x2_t` vectors. #[repr(C)] @@ -708,6 +994,10 @@ pub struct uint64x2x3_t(pub uint64x2_t, pub uint64x2_t, pub uint64x2_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct uint64x2x4_t( pub uint64x2_t, pub uint64x2_t, @@ -722,6 +1012,10 @@ pub struct uint64x2x4_t( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly64x1x2_t(pub poly64x1_t, pub poly64x1_t); /// ARM-specific type containing four `poly64x1_t` vectors. #[repr(C)] @@ -730,6 +1024,10 @@ pub struct poly64x1x2_t(pub poly64x1_t, pub poly64x1_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly64x1x3_t(pub poly64x1_t, pub poly64x1_t, pub poly64x1_t); /// ARM-specific type containing four `poly64x1_t` vectors. #[repr(C)] @@ -738,6 +1036,10 @@ pub struct poly64x1x3_t(pub poly64x1_t, pub poly64x1_t, pub poly64x1_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly64x1x4_t( pub poly64x1_t, pub poly64x1_t, @@ -752,6 +1054,10 @@ pub struct poly64x1x4_t( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly64x2x2_t(pub poly64x2_t, pub poly64x2_t); /// ARM-specific type containing four `poly64x2_t` vectors. #[repr(C)] @@ -760,6 +1066,10 @@ pub struct poly64x2x2_t(pub poly64x2_t, pub poly64x2_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly64x2x3_t(pub poly64x2_t, pub poly64x2_t, pub poly64x2_t); /// ARM-specific type containing four `poly64x2_t` vectors. #[repr(C)] @@ -768,6 +1078,10 @@ pub struct poly64x2x3_t(pub poly64x2_t, pub poly64x2_t, pub poly64x2_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub struct poly64x2x4_t( pub poly64x2_t, pub poly64x2_t, @@ -974,6 +1288,10 @@ extern "unadjusted" { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> int8x8_t { static_assert_uimm_bits!(LANE, 3); simd_insert(src, LANE as u32, *ptr) @@ -990,6 +1308,10 @@ pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> in not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> int8x16_t { static_assert_uimm_bits!(LANE, 4); simd_insert(src, LANE as u32, *ptr) @@ -1006,6 +1328,10 @@ pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> int16x4_t { static_assert_uimm_bits!(LANE, 2); simd_insert(src, LANE as u32, *ptr) @@ -1022,6 +1348,10 @@ pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) -> int16x8_t { static_assert_uimm_bits!(LANE, 3); simd_insert(src, LANE as u32, *ptr) @@ -1038,6 +1368,10 @@ pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) - not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 1); simd_insert(src, LANE as u32, *ptr) @@ -1054,6 +1388,10 @@ pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 2); simd_insert(src, LANE as u32, *ptr) @@ -1070,6 +1408,10 @@ pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) - not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> int64x1_t { static_assert!(LANE == 0); simd_insert(src, LANE as u32, *ptr) @@ -1086,6 +1428,10 @@ pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) -> int64x2_t { static_assert_uimm_bits!(LANE, 1); simd_insert(src, LANE as u32, *ptr) @@ -1102,6 +1448,10 @@ pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) - not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> uint8x8_t { static_assert_uimm_bits!(LANE, 3); simd_insert(src, LANE as u32, *ptr) @@ -1118,6 +1468,10 @@ pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> u not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> uint8x16_t { static_assert_uimm_bits!(LANE, 4); simd_insert(src, LANE as u32, *ptr) @@ -1134,6 +1488,10 @@ pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) -> uint16x4_t { static_assert_uimm_bits!(LANE, 2); simd_insert(src, LANE as u32, *ptr) @@ -1150,6 +1508,10 @@ pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) - not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) -> uint16x8_t { static_assert_uimm_bits!(LANE, 3); simd_insert(src, LANE as u32, *ptr) @@ -1166,6 +1528,10 @@ pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) -> uint32x2_t { static_assert_uimm_bits!(LANE, 1); simd_insert(src, LANE as u32, *ptr) @@ -1182,6 +1548,10 @@ pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) - not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 2); simd_insert(src, LANE as u32, *ptr) @@ -1198,6 +1568,10 @@ pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) -> uint64x1_t { static_assert!(LANE == 0); simd_insert(src, LANE as u32, *ptr) @@ -1214,6 +1588,10 @@ pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) - not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) -> uint64x2_t { static_assert_uimm_bits!(LANE, 1); simd_insert(src, LANE as u32, *ptr) @@ -1230,6 +1608,10 @@ pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> poly8x8_t { static_assert_uimm_bits!(LANE, 3); simd_insert(src, LANE as u32, *ptr) @@ -1246,6 +1628,10 @@ pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> p not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> poly8x16_t { static_assert_uimm_bits!(LANE, 4); simd_insert(src, LANE as u32, *ptr) @@ -1262,6 +1648,10 @@ pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) -> poly16x4_t { static_assert_uimm_bits!(LANE, 2); simd_insert(src, LANE as u32, *ptr) @@ -1278,6 +1668,10 @@ pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) - not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) -> poly16x8_t { static_assert_uimm_bits!(LANE, 3); simd_insert(src, LANE as u32, *ptr) @@ -1296,6 +1690,10 @@ pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_lane_p64(ptr: *const p64, src: poly64x1_t) -> poly64x1_t { static_assert!(LANE == 0); simd_insert(src, LANE as u32, *ptr) @@ -1314,6 +1712,10 @@ pub unsafe fn vld1_lane_p64(ptr: *const p64, src: poly64x1_t) - not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_lane_p64(ptr: *const p64, src: poly64x2_t) -> poly64x2_t { static_assert_uimm_bits!(LANE, 1); simd_insert(src, LANE as u32, *ptr) @@ -1330,6 +1732,10 @@ pub unsafe fn vld1q_lane_p64(ptr: *const p64, src: poly64x2_t) not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) -> float32x2_t { static_assert_uimm_bits!(LANE, 1); simd_insert(src, LANE as u32, *ptr) @@ -1346,6 +1752,10 @@ pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) -> float32x4_t { static_assert_uimm_bits!(LANE, 2); simd_insert(src, LANE as u32, *ptr) @@ -1361,6 +1771,10 @@ pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { let x = vld1_lane_s8::<0>(ptr, transmute(i8x8::splat(0))); simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -1376,6 +1790,10 @@ pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { let x = vld1q_lane_s8::<0>(ptr, transmute(i8x16::splat(0))); simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) @@ -1391,6 +1809,10 @@ pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { let x = vld1_lane_s16::<0>(ptr, transmute(i16x4::splat(0))); simd_shuffle!(x, x, [0, 0, 0, 0]) @@ -1406,6 +1828,10 @@ pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { let x = vld1q_lane_s16::<0>(ptr, transmute(i16x8::splat(0))); simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -1421,6 +1847,10 @@ pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { let x = vld1_lane_s32::<0>(ptr, transmute(i32x2::splat(0))); simd_shuffle!(x, x, [0, 0]) @@ -1436,6 +1866,10 @@ pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t { let x = vld1q_lane_s32::<0>(ptr, transmute(i32x4::splat(0))); simd_shuffle!(x, x, [0, 0, 0, 0]) @@ -1451,6 +1885,10 @@ pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t { #[cfg(target_arch = "aarch64")] { @@ -1472,6 +1910,10 @@ pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { let x = vld1q_lane_s64::<0>(ptr, transmute(i64x2::splat(0))); simd_shuffle!(x, x, [0, 0]) @@ -1487,6 +1929,10 @@ pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { let x = vld1_lane_u8::<0>(ptr, transmute(u8x8::splat(0))); simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -1502,6 +1948,10 @@ pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t { let x = vld1q_lane_u8::<0>(ptr, transmute(u8x16::splat(0))); simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) @@ -1517,6 +1967,10 @@ pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { let x = vld1_lane_u16::<0>(ptr, transmute(u16x4::splat(0))); simd_shuffle!(x, x, [0, 0, 0, 0]) @@ -1532,6 +1986,10 @@ pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { let x = vld1q_lane_u16::<0>(ptr, transmute(u16x8::splat(0))); simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -1547,6 +2005,10 @@ pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { let x = vld1_lane_u32::<0>(ptr, transmute(u32x2::splat(0))); simd_shuffle!(x, x, [0, 0]) @@ -1562,6 +2024,10 @@ pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t { let x = vld1q_lane_u32::<0>(ptr, transmute(u32x4::splat(0))); simd_shuffle!(x, x, [0, 0, 0, 0]) @@ -1577,6 +2043,10 @@ pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t { #[cfg(target_arch = "aarch64")] { @@ -1598,6 +2068,10 @@ pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { let x = vld1q_lane_u64::<0>(ptr, transmute(u64x2::splat(0))); simd_shuffle!(x, x, [0, 0]) @@ -1613,6 +2087,10 @@ pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { let x = vld1_lane_p8::<0>(ptr, transmute(u8x8::splat(0))); simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -1628,6 +2106,10 @@ pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { let x = vld1q_lane_p8::<0>(ptr, transmute(u8x16::splat(0))); simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) @@ -1643,6 +2125,10 @@ pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { let x = vld1_lane_p16::<0>(ptr, transmute(u16x4::splat(0))); simd_shuffle!(x, x, [0, 0, 0, 0]) @@ -1658,6 +2144,10 @@ pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { let x = vld1q_lane_p16::<0>(ptr, transmute(u16x8::splat(0))); simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -1673,6 +2163,10 @@ pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { let x = vld1_lane_f32::<0>(ptr, transmute(f32x2::splat(0.))); simd_shuffle!(x, x, [0, 0]) @@ -1690,6 +2184,10 @@ pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1_dup_p64(ptr: *const p64) -> poly64x1_t { #[cfg(target_arch = "aarch64")] { @@ -1713,6 +2211,10 @@ pub unsafe fn vld1_dup_p64(ptr: *const p64) -> poly64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_dup_p64(ptr: *const p64) -> poly64x2_t { let x = vld1q_lane_p64::<0>(ptr, transmute(u64x2::splat(0))); simd_shuffle!(x, x, [0, 0]) @@ -1728,6 +2230,10 @@ pub unsafe fn vld1q_dup_p64(ptr: *const p64) -> poly64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t { let x = vld1q_lane_f32::<0>(ptr, transmute(f32x4::splat(0.))); simd_shuffle!(x, x, [0, 0, 0, 0]) @@ -1743,6 +2249,10 @@ pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaba_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { simd_add(a, vabd_s8(b, c)) } @@ -1755,6 +2265,10 @@ pub unsafe fn vaba_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaba_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { simd_add(a, vabd_s16(b, c)) } @@ -1767,6 +2281,10 @@ pub unsafe fn vaba_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaba_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { simd_add(a, vabd_s32(b, c)) } @@ -1779,6 +2297,10 @@ pub unsafe fn vaba_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaba_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { simd_add(a, vabd_u8(b, c)) } @@ -1791,6 +2313,10 @@ pub unsafe fn vaba_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaba_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { simd_add(a, vabd_u16(b, c)) } @@ -1803,6 +2329,10 @@ pub unsafe fn vaba_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaba_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { simd_add(a, vabd_u32(b, c)) } @@ -1816,6 +2346,10 @@ pub unsafe fn vaba_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vabaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { simd_add(a, vabdq_s8(b, c)) } @@ -1828,6 +2362,10 @@ pub unsafe fn vabaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vabaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { simd_add(a, vabdq_s16(b, c)) } @@ -1840,6 +2378,10 @@ pub unsafe fn vabaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vabaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { simd_add(a, vabdq_s32(b, c)) } @@ -1852,6 +2394,10 @@ pub unsafe fn vabaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vabaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { simd_add(a, vabdq_u8(b, c)) } @@ -1864,6 +2410,10 @@ pub unsafe fn vabaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vabaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { simd_add(a, vabdq_u16(b, c)) } @@ -1876,6 +2426,10 @@ pub unsafe fn vabaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vabaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { simd_add(a, vabdq_u32(b, c)) } @@ -1890,6 +2444,10 @@ pub unsafe fn vabaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { vabs_s8_(a) } @@ -1903,6 +2461,10 @@ pub unsafe fn vabs_s8(a: int8x8_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { vabs_s16_(a) } @@ -1916,6 +2478,10 @@ pub unsafe fn vabs_s16(a: int16x4_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { vabs_s32_(a) } @@ -1929,6 +2495,10 @@ pub unsafe fn vabs_s32(a: int32x2_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { vabsq_s8_(a) } @@ -1942,6 +2512,10 @@ pub unsafe fn vabsq_s8(a: int8x16_t) -> int8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { vabsq_s16_(a) } @@ -1955,6 +2529,10 @@ pub unsafe fn vabsq_s16(a: int16x8_t) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { vabsq_s32_(a) } @@ -1969,6 +2547,10 @@ pub unsafe fn vabsq_s32(a: int32x4_t) -> int32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { vpadd_s16_(a, b) } @@ -1982,6 +2564,10 @@ pub unsafe fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { vpadd_s32_(a, b) } @@ -1995,6 +2581,10 @@ pub unsafe fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { vpadd_s8_(a, b) } @@ -2008,6 +2598,10 @@ pub unsafe fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { transmute(vpadd_s16_(transmute(a), transmute(b))) } @@ -2021,6 +2615,10 @@ pub unsafe fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { transmute(vpadd_s32_(transmute(a), transmute(b))) } @@ -2034,6 +2632,10 @@ pub unsafe fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { transmute(vpadd_s8_(transmute(a), transmute(b))) } @@ -2048,6 +2650,10 @@ pub unsafe fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_add(a, b) } @@ -2062,6 +2668,10 @@ pub unsafe fn vadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_add(a, b) } @@ -2076,6 +2686,10 @@ pub unsafe fn vaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_add(a, b) } @@ -2090,6 +2704,10 @@ pub unsafe fn vadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_add(a, b) } @@ -2104,6 +2722,10 @@ pub unsafe fn vaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_add(a, b) } @@ -2118,6 +2740,10 @@ pub unsafe fn vadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_add(a, b) } @@ -2132,6 +2758,10 @@ pub unsafe fn vaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_add(a, b) } @@ -2146,6 +2776,10 @@ pub unsafe fn vaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_add(a, b) } @@ -2160,6 +2794,10 @@ pub unsafe fn vadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_add(a, b) } @@ -2174,6 +2812,10 @@ pub unsafe fn vaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_add(a, b) } @@ -2188,6 +2830,10 @@ pub unsafe fn vadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_add(a, b) } @@ -2202,6 +2848,10 @@ pub unsafe fn vaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_add(a, b) } @@ -2216,6 +2866,10 @@ pub unsafe fn vadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_add(a, b) } @@ -2230,6 +2884,10 @@ pub unsafe fn vaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_add(a, b) } @@ -2244,6 +2902,10 @@ pub unsafe fn vaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_add(a, b) } @@ -2258,6 +2920,10 @@ pub unsafe fn vadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_add(a, b) } @@ -2272,6 +2938,10 @@ pub unsafe fn vaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { let a: int16x8_t = simd_cast(a); let b: int16x8_t = simd_cast(b); @@ -2288,6 +2958,10 @@ pub unsafe fn vaddl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { let a: int32x4_t = simd_cast(a); let b: int32x4_t = simd_cast(b); @@ -2304,6 +2978,10 @@ pub unsafe fn vaddl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { let a: int64x2_t = simd_cast(a); let b: int64x2_t = simd_cast(b); @@ -2320,6 +2998,10 @@ pub unsafe fn vaddl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { let a: uint16x8_t = simd_cast(a); let b: uint16x8_t = simd_cast(b); @@ -2336,6 +3018,10 @@ pub unsafe fn vaddl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { let a: uint32x4_t = simd_cast(a); let b: uint32x4_t = simd_cast(b); @@ -2352,6 +3038,10 @@ pub unsafe fn vaddl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { let a: uint64x2_t = simd_cast(a); let b: uint64x2_t = simd_cast(b); @@ -2368,6 +3058,10 @@ pub unsafe fn vaddl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -2386,6 +3080,10 @@ pub unsafe fn vaddl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); @@ -2404,6 +3102,10 @@ pub unsafe fn vaddl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { let a: int32x2_t = simd_shuffle!(a, a, [2, 3]); let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); @@ -2422,6 +3124,10 @@ pub unsafe fn vaddl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); @@ -2440,6 +3146,10 @@ pub unsafe fn vaddl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]); let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); @@ -2458,6 +3168,10 @@ pub unsafe fn vaddl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]); let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); @@ -2476,6 +3190,10 @@ pub unsafe fn vaddl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { let b: int16x8_t = simd_cast(b); simd_add(a, b) @@ -2491,6 +3209,10 @@ pub unsafe fn vaddw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { let b: int32x4_t = simd_cast(b); simd_add(a, b) @@ -2506,6 +3228,10 @@ pub unsafe fn vaddw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { let b: int64x2_t = simd_cast(b); simd_add(a, b) @@ -2521,6 +3247,10 @@ pub unsafe fn vaddw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { let b: uint16x8_t = simd_cast(b); simd_add(a, b) @@ -2536,6 +3266,10 @@ pub unsafe fn vaddw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { let b: uint32x4_t = simd_cast(b); simd_add(a, b) @@ -2551,6 +3285,10 @@ pub unsafe fn vaddw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { let b: uint64x2_t = simd_cast(b); simd_add(a, b) @@ -2566,6 +3304,10 @@ pub unsafe fn vaddw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); let b: int16x8_t = simd_cast(b); @@ -2582,6 +3324,10 @@ pub unsafe fn vaddw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); let b: int32x4_t = simd_cast(b); @@ -2598,6 +3344,10 @@ pub unsafe fn vaddw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { let b: int32x2_t = simd_shuffle!(b, b, [2, 3]); let b: int64x2_t = simd_cast(b); @@ -2614,6 +3364,10 @@ pub unsafe fn vaddw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]); let b: uint16x8_t = simd_cast(b); @@ -2630,6 +3384,10 @@ pub unsafe fn vaddw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]); let b: uint32x4_t = simd_cast(b); @@ -2646,6 +3404,10 @@ pub unsafe fn vaddw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]); let b: uint64x2_t = simd_cast(b); @@ -2662,6 +3424,10 @@ pub unsafe fn vaddw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { simd_cast(simd_shr(simd_add(a, b), int16x8_t(8, 8, 8, 8, 8, 8, 8, 8))) } @@ -2676,6 +3442,10 @@ pub unsafe fn vaddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { simd_cast(simd_shr(simd_add(a, b), int32x4_t(16, 16, 16, 16))) } @@ -2690,6 +3460,10 @@ pub unsafe fn vaddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { simd_cast(simd_shr(simd_add(a, b), int64x2_t(32, 32))) } @@ -2704,6 +3478,10 @@ pub unsafe fn vaddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { simd_cast(simd_shr(simd_add(a, b), uint16x8_t(8, 8, 8, 8, 8, 8, 8, 8))) } @@ -2718,6 +3496,10 @@ pub unsafe fn vaddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { simd_cast(simd_shr(simd_add(a, b), uint32x4_t(16, 16, 16, 16))) } @@ -2732,6 +3514,10 @@ pub unsafe fn vaddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { simd_cast(simd_shr(simd_add(a, b), uint64x2_t(32, 32))) } @@ -2746,6 +3532,10 @@ pub unsafe fn vaddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t { let x = simd_cast(simd_shr(simd_add(a, b), int16x8_t(8, 8, 8, 8, 8, 8, 8, 8))); simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -2761,6 +3551,10 @@ pub unsafe fn vaddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x1 not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t { let x = simd_cast(simd_shr(simd_add(a, b), int32x4_t(16, 16, 16, 16))); simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -2776,6 +3570,10 @@ pub unsafe fn vaddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16 not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t { let x = simd_cast(simd_shr(simd_add(a, b), int64x2_t(32, 32))); simd_shuffle!(r, x, [0, 1, 2, 3]) @@ -2791,6 +3589,10 @@ pub unsafe fn vaddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32 not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t { let x = simd_cast(simd_shr(simd_add(a, b), uint16x8_t(8, 8, 8, 8, 8, 8, 8, 8))); simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -2806,6 +3608,10 @@ pub unsafe fn vaddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uin not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t { let x = simd_cast(simd_shr(simd_add(a, b), uint32x4_t(16, 16, 16, 16))); simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -2821,6 +3627,10 @@ pub unsafe fn vaddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> ui not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vaddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t { let x = simd_cast(simd_shr(simd_add(a, b), uint64x2_t(32, 32))); simd_shuffle!(r, x, [0, 1, 2, 3]) @@ -2836,6 +3646,10 @@ pub unsafe fn vaddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> ui not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { vraddhn_s16_(a, b) } @@ -2850,6 +3664,10 @@ pub unsafe fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { vraddhn_s32_(a, b) } @@ -2864,6 +3682,10 @@ pub unsafe fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { vraddhn_s64_(a, b) } @@ -2878,6 +3700,10 @@ pub unsafe fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { transmute(vraddhn_s16_(transmute(a), transmute(b))) } @@ -2892,6 +3718,10 @@ pub unsafe fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { transmute(vraddhn_s32_(transmute(a), transmute(b))) } @@ -2906,6 +3736,10 @@ pub unsafe fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { transmute(vraddhn_s64_(transmute(a), transmute(b))) } @@ -2920,6 +3754,10 @@ pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vraddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t { let x = vraddhn_s16_(a, b); simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -2935,6 +3773,10 @@ pub unsafe fn vraddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vraddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t { let x = vraddhn_s32_(a, b); simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -2950,6 +3792,10 @@ pub unsafe fn vraddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int1 not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vraddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t { let x = vraddhn_s64_(a, b); simd_shuffle!(r, x, [0, 1, 2, 3]) @@ -2965,6 +3811,10 @@ pub unsafe fn vraddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int3 not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vraddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t { let x: uint8x8_t = transmute(vraddhn_s16_(transmute(a), transmute(b))); simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -2980,6 +3830,10 @@ pub unsafe fn vraddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> ui not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vraddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t { let x: uint16x4_t = transmute(vraddhn_s32_(transmute(a), transmute(b))); simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -2995,6 +3849,10 @@ pub unsafe fn vraddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> u not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vraddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t { let x: uint32x2_t = transmute(vraddhn_s64_(transmute(a), transmute(b))); simd_shuffle!(r, x, [0, 1, 2, 3]) @@ -3010,6 +3868,10 @@ pub unsafe fn vraddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> u not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { vpaddl_s8_(a) } @@ -3024,6 +3886,10 @@ pub unsafe fn vpaddl_s8(a: int8x8_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { vpaddl_s16_(a) } @@ -3038,6 +3904,10 @@ pub unsafe fn vpaddl_s16(a: int16x4_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { vpaddl_s32_(a) } @@ -3052,6 +3922,10 @@ pub unsafe fn vpaddl_s32(a: int32x2_t) -> int64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { vpaddlq_s8_(a) } @@ -3066,6 +3940,10 @@ pub unsafe fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { vpaddlq_s16_(a) } @@ -3080,6 +3958,10 @@ pub unsafe fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { vpaddlq_s32_(a) } @@ -3094,6 +3976,10 @@ pub unsafe fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { vpaddl_u8_(a) } @@ -3108,6 +3994,10 @@ pub unsafe fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { vpaddl_u16_(a) } @@ -3122,6 +4012,10 @@ pub unsafe fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { vpaddl_u32_(a) } @@ -3136,6 +4030,10 @@ pub unsafe fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { vpaddlq_u8_(a) } @@ -3150,6 +4048,10 @@ pub unsafe fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { vpaddlq_u16_(a) } @@ -3164,6 +4066,10 @@ pub unsafe fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { vpaddlq_u32_(a) } @@ -3178,6 +4084,10 @@ pub unsafe fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovn_s16(a: int16x8_t) -> int8x8_t { simd_cast(a) } @@ -3192,6 +4102,10 @@ pub unsafe fn vmovn_s16(a: int16x8_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovn_s32(a: int32x4_t) -> int16x4_t { simd_cast(a) } @@ -3206,6 +4120,10 @@ pub unsafe fn vmovn_s32(a: int32x4_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovn_s64(a: int64x2_t) -> int32x2_t { simd_cast(a) } @@ -3220,6 +4138,10 @@ pub unsafe fn vmovn_s64(a: int64x2_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovn_u16(a: uint16x8_t) -> uint8x8_t { simd_cast(a) } @@ -3234,6 +4156,10 @@ pub unsafe fn vmovn_u16(a: uint16x8_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovn_u32(a: uint32x4_t) -> uint16x4_t { simd_cast(a) } @@ -3248,6 +4174,10 @@ pub unsafe fn vmovn_u32(a: uint32x4_t) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovn_u64(a: uint64x2_t) -> uint32x2_t { simd_cast(a) } @@ -3262,6 +4192,10 @@ pub unsafe fn vmovn_u64(a: uint64x2_t) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovl_s8(a: int8x8_t) -> int16x8_t { simd_cast(a) } @@ -3276,6 +4210,10 @@ pub unsafe fn vmovl_s8(a: int8x8_t) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovl_s16(a: int16x4_t) -> int32x4_t { simd_cast(a) } @@ -3290,6 +4228,10 @@ pub unsafe fn vmovl_s16(a: int16x4_t) -> int32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovl_s32(a: int32x2_t) -> int64x2_t { simd_cast(a) } @@ -3304,6 +4246,10 @@ pub unsafe fn vmovl_s32(a: int32x2_t) -> int64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovl_u8(a: uint8x8_t) -> uint16x8_t { simd_cast(a) } @@ -3318,6 +4264,10 @@ pub unsafe fn vmovl_u8(a: uint8x8_t) -> uint16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovl_u16(a: uint16x4_t) -> uint32x4_t { simd_cast(a) } @@ -3332,6 +4282,10 @@ pub unsafe fn vmovl_u16(a: uint16x4_t) -> uint32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovl_u32(a: uint32x2_t) -> uint64x2_t { simd_cast(a) } @@ -3346,6 +4300,10 @@ pub unsafe fn vmovl_u32(a: uint32x2_t) -> uint64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmvn_s8(a: int8x8_t) -> int8x8_t { let b = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_xor(a, b) @@ -3361,6 +4319,10 @@ pub unsafe fn vmvn_s8(a: int8x8_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmvnq_s8(a: int8x16_t) -> int8x16_t { let b = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -3378,6 +4340,10 @@ pub unsafe fn vmvnq_s8(a: int8x16_t) -> int8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmvn_s16(a: int16x4_t) -> int16x4_t { let b = int16x4_t(-1, -1, -1, -1); simd_xor(a, b) @@ -3393,6 +4359,10 @@ pub unsafe fn vmvn_s16(a: int16x4_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmvnq_s16(a: int16x8_t) -> int16x8_t { let b = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_xor(a, b) @@ -3408,6 +4378,10 @@ pub unsafe fn vmvnq_s16(a: int16x8_t) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmvn_s32(a: int32x2_t) -> int32x2_t { let b = int32x2_t(-1, -1); simd_xor(a, b) @@ -3423,6 +4397,10 @@ pub unsafe fn vmvn_s32(a: int32x2_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmvnq_s32(a: int32x4_t) -> int32x4_t { let b = int32x4_t(-1, -1, -1, -1); simd_xor(a, b) @@ -3438,6 +4416,10 @@ pub unsafe fn vmvnq_s32(a: int32x4_t) -> int32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmvn_u8(a: uint8x8_t) -> uint8x8_t { let b = uint8x8_t(255, 255, 255, 255, 255, 255, 255, 255); simd_xor(a, b) @@ -3453,6 +4435,10 @@ pub unsafe fn vmvn_u8(a: uint8x8_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t { let b = uint8x16_t( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, @@ -3470,6 +4456,10 @@ pub unsafe fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmvn_u16(a: uint16x4_t) -> uint16x4_t { let b = uint16x4_t(65_535, 65_535, 65_535, 65_535); simd_xor(a, b) @@ -3485,6 +4475,10 @@ pub unsafe fn vmvn_u16(a: uint16x4_t) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t { let b = uint16x8_t( 65_535, 65_535, 65_535, 65_535, 65_535, 65_535, 65_535, 65_535, @@ -3502,6 +4496,10 @@ pub unsafe fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmvn_u32(a: uint32x2_t) -> uint32x2_t { let b = uint32x2_t(4_294_967_295, 4_294_967_295); simd_xor(a, b) @@ -3517,6 +4515,10 @@ pub unsafe fn vmvn_u32(a: uint32x2_t) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t { let b = uint32x4_t(4_294_967_295, 4_294_967_295, 4_294_967_295, 4_294_967_295); simd_xor(a, b) @@ -3532,6 +4534,10 @@ pub unsafe fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmvn_p8(a: poly8x8_t) -> poly8x8_t { let b = poly8x8_t(255, 255, 255, 255, 255, 255, 255, 255); simd_xor(a, b) @@ -3547,6 +4553,10 @@ pub unsafe fn vmvn_p8(a: poly8x8_t) -> poly8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t { let b = poly8x16_t( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, @@ -3564,6 +4574,10 @@ pub unsafe fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbic_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let c = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_and(simd_xor(b, c), a) @@ -3579,6 +4593,10 @@ pub unsafe fn vbic_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbicq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { let c = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -3596,6 +4614,10 @@ pub unsafe fn vbicq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbic_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let c = int16x4_t(-1, -1, -1, -1); simd_and(simd_xor(b, c), a) @@ -3611,6 +4633,10 @@ pub unsafe fn vbic_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbicq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let c = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_and(simd_xor(b, c), a) @@ -3626,6 +4652,10 @@ pub unsafe fn vbicq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbic_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let c = int32x2_t(-1, -1); simd_and(simd_xor(b, c), a) @@ -3641,6 +4671,10 @@ pub unsafe fn vbic_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbicq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let c = int32x4_t(-1, -1, -1, -1); simd_and(simd_xor(b, c), a) @@ -3656,6 +4690,10 @@ pub unsafe fn vbicq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbic_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { let c = int64x1_t(-1); simd_and(simd_xor(b, c), a) @@ -3671,6 +4709,10 @@ pub unsafe fn vbic_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbicq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let c = int64x2_t(-1, -1); simd_and(simd_xor(b, c), a) @@ -3686,6 +4728,10 @@ pub unsafe fn vbicq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbic_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let c = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -3701,6 +4747,10 @@ pub unsafe fn vbic_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { let c = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -3718,6 +4768,10 @@ pub unsafe fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbic_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let c = int16x4_t(-1, -1, -1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -3733,6 +4787,10 @@ pub unsafe fn vbic_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbicq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let c = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -3748,6 +4806,10 @@ pub unsafe fn vbicq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbic_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let c = int32x2_t(-1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -3763,6 +4825,10 @@ pub unsafe fn vbic_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbicq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let c = int32x4_t(-1, -1, -1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -3778,6 +4844,10 @@ pub unsafe fn vbicq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbic_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { let c = int64x1_t(-1); simd_and(simd_xor(b, transmute(c)), a) @@ -3793,6 +4863,10 @@ pub unsafe fn vbic_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbicq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let c = int64x2_t(-1, -1); simd_and(simd_xor(b, transmute(c)), a) @@ -3812,6 +4886,10 @@ pub unsafe fn vbicq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { let not = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); transmute(simd_or( @@ -3830,6 +4908,10 @@ pub unsafe fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { let not = int16x4_t(-1, -1, -1, -1); transmute(simd_or( @@ -3848,6 +4930,10 @@ pub unsafe fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { let not = int32x2_t(-1, -1); transmute(simd_or( @@ -3866,6 +4952,10 @@ pub unsafe fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { let not = int64x1_t(-1); transmute(simd_or( @@ -3884,6 +4974,10 @@ pub unsafe fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { let not = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); transmute(simd_or( @@ -3902,6 +4996,10 @@ pub unsafe fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { let not = int16x4_t(-1, -1, -1, -1); transmute(simd_or( @@ -3920,6 +5018,10 @@ pub unsafe fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { let not = int32x2_t(-1, -1); transmute(simd_or( @@ -3938,6 +5040,10 @@ pub unsafe fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_t { let not = int64x1_t(-1); transmute(simd_or( @@ -3956,6 +5062,10 @@ pub unsafe fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { let not = int32x2_t(-1, -1); transmute(simd_or( @@ -3974,6 +5084,10 @@ pub unsafe fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32 not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t { let not = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); transmute(simd_or( @@ -3992,6 +5106,10 @@ pub unsafe fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_t { let not = int16x4_t(-1, -1, -1, -1); transmute(simd_or( @@ -4010,6 +5128,10 @@ pub unsafe fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { let not = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -4030,6 +5152,10 @@ pub unsafe fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { let not = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); transmute(simd_or( @@ -4048,6 +5174,10 @@ pub unsafe fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { let not = int32x4_t(-1, -1, -1, -1); transmute(simd_or( @@ -4066,6 +5196,10 @@ pub unsafe fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { let not = int64x2_t(-1, -1); transmute(simd_or( @@ -4084,6 +5218,10 @@ pub unsafe fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { let not = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -4104,6 +5242,10 @@ pub unsafe fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { let not = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); transmute(simd_or( @@ -4122,6 +5264,10 @@ pub unsafe fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { let not = int32x4_t(-1, -1, -1, -1); transmute(simd_or( @@ -4140,6 +5286,10 @@ pub unsafe fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { let not = int64x2_t(-1, -1); transmute(simd_or( @@ -4158,6 +5308,10 @@ pub unsafe fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2 not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_t { let not = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -4178,6 +5332,10 @@ pub unsafe fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8_t { let not = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); transmute(simd_or( @@ -4196,6 +5354,10 @@ pub unsafe fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8 not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vbslq_f32(a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { let not = int32x4_t(-1, -1, -1, -1); transmute(simd_or( @@ -4214,6 +5376,10 @@ pub unsafe fn vbslq_f32(a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float3 not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { let c = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_or(simd_xor(b, c), a) @@ -4229,6 +5395,10 @@ pub unsafe fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { let c = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -4246,6 +5416,10 @@ pub unsafe fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { let c = int16x4_t(-1, -1, -1, -1); simd_or(simd_xor(b, c), a) @@ -4261,6 +5435,10 @@ pub unsafe fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { let c = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_or(simd_xor(b, c), a) @@ -4276,6 +5454,10 @@ pub unsafe fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { let c = int32x2_t(-1, -1); simd_or(simd_xor(b, c), a) @@ -4291,6 +5473,10 @@ pub unsafe fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { let c = int32x4_t(-1, -1, -1, -1); simd_or(simd_xor(b, c), a) @@ -4306,6 +5492,10 @@ pub unsafe fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { let c = int64x1_t(-1); simd_or(simd_xor(b, c), a) @@ -4321,6 +5511,10 @@ pub unsafe fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { let c = int64x2_t(-1, -1); simd_or(simd_xor(b, c), a) @@ -4336,6 +5530,10 @@ pub unsafe fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let c = int8x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -4351,6 +5549,10 @@ pub unsafe fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { let c = int8x16_t( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -4368,6 +5570,10 @@ pub unsafe fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let c = int16x4_t(-1, -1, -1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -4383,6 +5589,10 @@ pub unsafe fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let c = int16x8_t(-1, -1, -1, -1, -1, -1, -1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -4398,6 +5608,10 @@ pub unsafe fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let c = int32x2_t(-1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -4413,6 +5627,10 @@ pub unsafe fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let c = int32x4_t(-1, -1, -1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -4428,6 +5646,10 @@ pub unsafe fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { let c = int64x1_t(-1); simd_or(simd_xor(b, transmute(c)), a) @@ -4443,6 +5665,10 @@ pub unsafe fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { let c = int64x2_t(-1, -1); simd_or(simd_xor(b, transmute(c)), a) @@ -4458,6 +5684,10 @@ pub unsafe fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { vpmins_v8i8(a, b) } @@ -4472,6 +5702,10 @@ pub unsafe fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { vpmins_v4i16(a, b) } @@ -4486,6 +5720,10 @@ pub unsafe fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { vpmins_v2i32(a, b) } @@ -4500,6 +5738,10 @@ pub unsafe fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { vpminu_v8i8(a, b) } @@ -4514,6 +5756,10 @@ pub unsafe fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { vpminu_v4i16(a, b) } @@ -4528,6 +5774,10 @@ pub unsafe fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { vpminu_v2i32(a, b) } @@ -4542,6 +5792,10 @@ pub unsafe fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { vpminf_v2f32(a, b) } @@ -4556,6 +5810,10 @@ pub unsafe fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { vpmaxs_v8i8(a, b) } @@ -4570,6 +5828,10 @@ pub unsafe fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { vpmaxs_v4i16(a, b) } @@ -4584,6 +5846,10 @@ pub unsafe fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { vpmaxs_v2i32(a, b) } @@ -4598,6 +5864,10 @@ pub unsafe fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { vpmaxu_v8i8(a, b) } @@ -4612,6 +5882,10 @@ pub unsafe fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { vpmaxu_v4i16(a, b) } @@ -4626,6 +5900,10 @@ pub unsafe fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { vpmaxu_v2i32(a, b) } @@ -4640,6 +5918,10 @@ pub unsafe fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { vpmaxf_v2f32(a, b) } @@ -4654,6 +5936,10 @@ pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vgetq_lane_u64(v: uint64x2_t) -> u64 { static_assert_uimm_bits!(IMM5, 1); simd_extract(v, IMM5 as u32) @@ -4669,6 +5955,10 @@ pub unsafe fn vgetq_lane_u64(v: uint64x2_t) -> u64 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_lane_u64(v: uint64x1_t) -> u64 { static_assert!(IMM5 == 0); simd_extract(v, 0) @@ -4684,6 +5974,10 @@ pub unsafe fn vget_lane_u64(v: uint64x1_t) -> u64 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_lane_u16(v: uint16x4_t) -> u16 { static_assert_uimm_bits!(IMM5, 2); simd_extract(v, IMM5 as u32) @@ -4699,6 +5993,10 @@ pub unsafe fn vget_lane_u16(v: uint16x4_t) -> u16 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_lane_s16(v: int16x4_t) -> i16 { static_assert_uimm_bits!(IMM5, 2); simd_extract(v, IMM5 as u32) @@ -4714,6 +6012,10 @@ pub unsafe fn vget_lane_s16(v: int16x4_t) -> i16 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_lane_p16(v: poly16x4_t) -> p16 { static_assert_uimm_bits!(IMM5, 2); simd_extract(v, IMM5 as u32) @@ -4729,6 +6031,10 @@ pub unsafe fn vget_lane_p16(v: poly16x4_t) -> p16 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_lane_u32(v: uint32x2_t) -> u32 { static_assert_uimm_bits!(IMM5, 1); simd_extract(v, IMM5 as u32) @@ -4744,6 +6050,10 @@ pub unsafe fn vget_lane_u32(v: uint32x2_t) -> u32 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_lane_s32(v: int32x2_t) -> i32 { static_assert_uimm_bits!(IMM5, 1); simd_extract(v, IMM5 as u32) @@ -4759,6 +6069,10 @@ pub unsafe fn vget_lane_s32(v: int32x2_t) -> i32 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_lane_f32(v: float32x2_t) -> f32 { static_assert_uimm_bits!(IMM5, 1); simd_extract(v, IMM5 as u32) @@ -4774,6 +6088,10 @@ pub unsafe fn vget_lane_f32(v: float32x2_t) -> f32 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vgetq_lane_f32(v: float32x4_t) -> f32 { static_assert_uimm_bits!(IMM5, 2); simd_extract(v, IMM5 as u32) @@ -4789,6 +6107,10 @@ pub unsafe fn vgetq_lane_f32(v: float32x4_t) -> f32 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_lane_p64(v: poly64x1_t) -> p64 { static_assert!(IMM5 == 0); simd_extract(v, IMM5 as u32) @@ -4804,6 +6126,10 @@ pub unsafe fn vget_lane_p64(v: poly64x1_t) -> p64 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vgetq_lane_p64(v: poly64x2_t) -> p64 { static_assert_uimm_bits!(IMM5, 1); simd_extract(v, IMM5 as u32) @@ -4819,6 +6145,10 @@ pub unsafe fn vgetq_lane_p64(v: poly64x2_t) -> p64 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_lane_s64(v: int64x1_t) -> i64 { static_assert!(IMM5 == 0); simd_extract(v, IMM5 as u32) @@ -4834,6 +6164,10 @@ pub unsafe fn vget_lane_s64(v: int64x1_t) -> i64 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vgetq_lane_s64(v: int64x2_t) -> i64 { static_assert_uimm_bits!(IMM5, 1); simd_extract(v, IMM5 as u32) @@ -4849,6 +6183,10 @@ pub unsafe fn vgetq_lane_s64(v: int64x2_t) -> i64 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vgetq_lane_u16(v: uint16x8_t) -> u16 { static_assert_uimm_bits!(IMM5, 3); simd_extract(v, IMM5 as u32) @@ -4864,6 +6202,10 @@ pub unsafe fn vgetq_lane_u16(v: uint16x8_t) -> u16 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vgetq_lane_u32(v: uint32x4_t) -> u32 { static_assert_uimm_bits!(IMM5, 2); simd_extract(v, IMM5 as u32) @@ -4879,6 +6221,10 @@ pub unsafe fn vgetq_lane_u32(v: uint32x4_t) -> u32 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vgetq_lane_s16(v: int16x8_t) -> i16 { static_assert_uimm_bits!(IMM5, 3); simd_extract(v, IMM5 as u32) @@ -4894,6 +6240,10 @@ pub unsafe fn vgetq_lane_s16(v: int16x8_t) -> i16 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vgetq_lane_p16(v: poly16x8_t) -> p16 { static_assert_uimm_bits!(IMM5, 3); simd_extract(v, IMM5 as u32) @@ -4909,6 +6259,10 @@ pub unsafe fn vgetq_lane_p16(v: poly16x8_t) -> p16 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vgetq_lane_s32(v: int32x4_t) -> i32 { static_assert_uimm_bits!(IMM5, 2); simd_extract(v, IMM5 as u32) @@ -4924,6 +6278,10 @@ pub unsafe fn vgetq_lane_s32(v: int32x4_t) -> i32 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_lane_u8(v: uint8x8_t) -> u8 { static_assert_uimm_bits!(IMM5, 3); simd_extract(v, IMM5 as u32) @@ -4939,6 +6297,10 @@ pub unsafe fn vget_lane_u8(v: uint8x8_t) -> u8 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_lane_s8(v: int8x8_t) -> i8 { static_assert_uimm_bits!(IMM5, 3); simd_extract(v, IMM5 as u32) @@ -4954,6 +6316,10 @@ pub unsafe fn vget_lane_s8(v: int8x8_t) -> i8 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_lane_p8(v: poly8x8_t) -> p8 { static_assert_uimm_bits!(IMM5, 3); simd_extract(v, IMM5 as u32) @@ -4969,6 +6335,10 @@ pub unsafe fn vget_lane_p8(v: poly8x8_t) -> p8 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vgetq_lane_u8(v: uint8x16_t) -> u8 { static_assert_uimm_bits!(IMM5, 4); simd_extract(v, IMM5 as u32) @@ -4984,6 +6354,10 @@ pub unsafe fn vgetq_lane_u8(v: uint8x16_t) -> u8 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vgetq_lane_s8(v: int8x16_t) -> i8 { static_assert_uimm_bits!(IMM5, 4); simd_extract(v, IMM5 as u32) @@ -4999,6 +6373,10 @@ pub unsafe fn vgetq_lane_s8(v: int8x16_t) -> i8 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vgetq_lane_p8(v: poly8x16_t) -> p8 { static_assert_uimm_bits!(IMM5, 4); simd_extract(v, IMM5 as u32) @@ -5014,6 +6392,10 @@ pub unsafe fn vgetq_lane_p8(v: poly8x16_t) -> p8 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_high_s8(a: int8x16_t) -> int8x8_t { simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } @@ -5028,6 +6410,10 @@ pub unsafe fn vget_high_s8(a: int8x16_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_high_s16(a: int16x8_t) -> int16x4_t { simd_shuffle!(a, a, [4, 5, 6, 7]) } @@ -5042,6 +6428,10 @@ pub unsafe fn vget_high_s16(a: int16x8_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_high_s32(a: int32x4_t) -> int32x2_t { simd_shuffle!(a, a, [2, 3]) } @@ -5056,6 +6446,10 @@ pub unsafe fn vget_high_s32(a: int32x4_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_high_s64(a: int64x2_t) -> int64x1_t { int64x1_t(simd_extract(a, 1)) } @@ -5070,6 +6464,10 @@ pub unsafe fn vget_high_s64(a: int64x2_t) -> int64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_high_u8(a: uint8x16_t) -> uint8x8_t { simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } @@ -5084,6 +6482,10 @@ pub unsafe fn vget_high_u8(a: uint8x16_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_high_u16(a: uint16x8_t) -> uint16x4_t { simd_shuffle!(a, a, [4, 5, 6, 7]) } @@ -5098,6 +6500,10 @@ pub unsafe fn vget_high_u16(a: uint16x8_t) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_high_u32(a: uint32x4_t) -> uint32x2_t { simd_shuffle!(a, a, [2, 3]) } @@ -5112,6 +6518,10 @@ pub unsafe fn vget_high_u32(a: uint32x4_t) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_high_u64(a: uint64x2_t) -> uint64x1_t { uint64x1_t(simd_extract(a, 1)) } @@ -5126,6 +6536,10 @@ pub unsafe fn vget_high_u64(a: uint64x2_t) -> uint64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_high_p8(a: poly8x16_t) -> poly8x8_t { simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]) } @@ -5140,6 +6554,10 @@ pub unsafe fn vget_high_p8(a: poly8x16_t) -> poly8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_high_p16(a: poly16x8_t) -> poly16x4_t { simd_shuffle!(a, a, [4, 5, 6, 7]) } @@ -5154,6 +6572,10 @@ pub unsafe fn vget_high_p16(a: poly16x8_t) -> poly16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_high_f32(a: float32x4_t) -> float32x2_t { simd_shuffle!(a, a, [2, 3]) } @@ -5167,6 +6589,10 @@ pub unsafe fn vget_high_f32(a: float32x4_t) -> float32x2_t { not(target_arch = "arm"), stable(feature = "vget_low_s8", since = "1.60.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_low_s8(a: int8x16_t) -> int8x8_t { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -5180,6 +6606,10 @@ pub unsafe fn vget_low_s8(a: int8x16_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_low_s16(a: int16x8_t) -> int16x4_t { simd_shuffle!(a, a, [0, 1, 2, 3]) } @@ -5193,6 +6623,10 @@ pub unsafe fn vget_low_s16(a: int16x8_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_low_s32(a: int32x4_t) -> int32x2_t { simd_shuffle!(a, a, [0, 1]) } @@ -5206,6 +6640,10 @@ pub unsafe fn vget_low_s32(a: int32x4_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_low_s64(a: int64x2_t) -> int64x1_t { int64x1_t(simd_extract(a, 0)) } @@ -5219,6 +6657,10 @@ pub unsafe fn vget_low_s64(a: int64x2_t) -> int64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -5232,6 +6674,10 @@ pub unsafe fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { simd_shuffle!(a, a, [0, 1, 2, 3]) } @@ -5245,6 +6691,10 @@ pub unsafe fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { simd_shuffle!(a, a, [0, 1]) } @@ -5258,6 +6708,10 @@ pub unsafe fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { uint64x1_t(simd_extract(a, 0)) } @@ -5271,6 +6725,10 @@ pub unsafe fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -5284,6 +6742,10 @@ pub unsafe fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { simd_shuffle!(a, a, [0, 1, 2, 3]) } @@ -5297,6 +6759,10 @@ pub unsafe fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vget_low_f32(a: float32x4_t) -> float32x2_t { simd_shuffle!(a, a, [0, 1]) } @@ -5311,6 +6777,10 @@ pub unsafe fn vget_low_f32(a: float32x4_t) -> float32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdupq_n_s8(value: i8) -> int8x16_t { int8x16_t( value, value, value, value, value, value, value, value, value, value, value, value, value, @@ -5328,6 +6798,10 @@ pub unsafe fn vdupq_n_s8(value: i8) -> int8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdupq_n_s16(value: i16) -> int16x8_t { int16x8_t(value, value, value, value, value, value, value, value) } @@ -5342,6 +6816,10 @@ pub unsafe fn vdupq_n_s16(value: i16) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdupq_n_s32(value: i32) -> int32x4_t { int32x4_t(value, value, value, value) } @@ -5356,6 +6834,10 @@ pub unsafe fn vdupq_n_s32(value: i32) -> int32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdupq_n_s64(value: i64) -> int64x2_t { int64x2_t(value, value) } @@ -5370,6 +6852,10 @@ pub unsafe fn vdupq_n_s64(value: i64) -> int64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdupq_n_u8(value: u8) -> uint8x16_t { uint8x16_t( value, value, value, value, value, value, value, value, value, value, value, value, value, @@ -5387,6 +6873,10 @@ pub unsafe fn vdupq_n_u8(value: u8) -> uint8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdupq_n_u16(value: u16) -> uint16x8_t { uint16x8_t(value, value, value, value, value, value, value, value) } @@ -5401,6 +6891,10 @@ pub unsafe fn vdupq_n_u16(value: u16) -> uint16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdupq_n_u32(value: u32) -> uint32x4_t { uint32x4_t(value, value, value, value) } @@ -5415,6 +6909,10 @@ pub unsafe fn vdupq_n_u32(value: u32) -> uint32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdupq_n_u64(value: u64) -> uint64x2_t { uint64x2_t(value, value) } @@ -5429,6 +6927,10 @@ pub unsafe fn vdupq_n_u64(value: u64) -> uint64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdupq_n_p8(value: p8) -> poly8x16_t { poly8x16_t( value, value, value, value, value, value, value, value, value, value, value, value, value, @@ -5446,6 +6948,10 @@ pub unsafe fn vdupq_n_p8(value: p8) -> poly8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdupq_n_p16(value: p16) -> poly16x8_t { poly16x8_t(value, value, value, value, value, value, value, value) } @@ -5460,6 +6966,10 @@ pub unsafe fn vdupq_n_p16(value: p16) -> poly16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdupq_n_f32(value: f32) -> float32x4_t { float32x4_t(value, value, value, value) } @@ -5491,6 +7001,10 @@ unsafe fn vdupq_n_f32_vfp4(value: f32) -> float32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdup_n_s8(value: i8) -> int8x8_t { int8x8_t(value, value, value, value, value, value, value, value) } @@ -5505,6 +7019,10 @@ pub unsafe fn vdup_n_s8(value: i8) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdup_n_s16(value: i16) -> int16x4_t { int16x4_t(value, value, value, value) } @@ -5519,6 +7037,10 @@ pub unsafe fn vdup_n_s16(value: i16) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdup_n_s32(value: i32) -> int32x2_t { int32x2_t(value, value) } @@ -5533,6 +7055,10 @@ pub unsafe fn vdup_n_s32(value: i32) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdup_n_s64(value: i64) -> int64x1_t { int64x1_t(value) } @@ -5547,6 +7073,10 @@ pub unsafe fn vdup_n_s64(value: i64) -> int64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdup_n_u8(value: u8) -> uint8x8_t { uint8x8_t(value, value, value, value, value, value, value, value) } @@ -5561,6 +7091,10 @@ pub unsafe fn vdup_n_u8(value: u8) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdup_n_u16(value: u16) -> uint16x4_t { uint16x4_t(value, value, value, value) } @@ -5575,6 +7109,10 @@ pub unsafe fn vdup_n_u16(value: u16) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdup_n_u32(value: u32) -> uint32x2_t { uint32x2_t(value, value) } @@ -5589,6 +7127,10 @@ pub unsafe fn vdup_n_u32(value: u32) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdup_n_u64(value: u64) -> uint64x1_t { uint64x1_t(value) } @@ -5603,6 +7145,10 @@ pub unsafe fn vdup_n_u64(value: u64) -> uint64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdup_n_p8(value: p8) -> poly8x8_t { poly8x8_t(value, value, value, value, value, value, value, value) } @@ -5617,6 +7163,10 @@ pub unsafe fn vdup_n_p8(value: p8) -> poly8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdup_n_p16(value: p16) -> poly16x4_t { poly16x4_t(value, value, value, value) } @@ -5631,6 +7181,10 @@ pub unsafe fn vdup_n_p16(value: p16) -> poly16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vdup_n_f32(value: f32) -> float32x2_t { float32x2_t(value, value) } @@ -5662,6 +7216,10 @@ unsafe fn vdup_n_f32_vfp4(value: f32) -> float32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vldrq_p128(a: *const p128) -> p128 { *a } @@ -5676,6 +7234,10 @@ pub unsafe fn vldrq_p128(a: *const p128) -> p128 { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vstrq_p128(a: *mut p128, b: p128) { *a = b; } @@ -5690,6 +7252,10 @@ pub unsafe fn vstrq_p128(a: *mut p128, b: p128) { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmov_n_s8(value: i8) -> int8x8_t { vdup_n_s8(value) } @@ -5704,6 +7270,10 @@ pub unsafe fn vmov_n_s8(value: i8) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmov_n_s16(value: i16) -> int16x4_t { vdup_n_s16(value) } @@ -5718,6 +7288,10 @@ pub unsafe fn vmov_n_s16(value: i16) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmov_n_s32(value: i32) -> int32x2_t { vdup_n_s32(value) } @@ -5732,6 +7306,10 @@ pub unsafe fn vmov_n_s32(value: i32) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmov_n_s64(value: i64) -> int64x1_t { vdup_n_s64(value) } @@ -5746,6 +7324,10 @@ pub unsafe fn vmov_n_s64(value: i64) -> int64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmov_n_u8(value: u8) -> uint8x8_t { vdup_n_u8(value) } @@ -5760,6 +7342,10 @@ pub unsafe fn vmov_n_u8(value: u8) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmov_n_u16(value: u16) -> uint16x4_t { vdup_n_u16(value) } @@ -5774,6 +7360,10 @@ pub unsafe fn vmov_n_u16(value: u16) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmov_n_u32(value: u32) -> uint32x2_t { vdup_n_u32(value) } @@ -5788,6 +7378,10 @@ pub unsafe fn vmov_n_u32(value: u32) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmov_n_u64(value: u64) -> uint64x1_t { vdup_n_u64(value) } @@ -5802,6 +7396,10 @@ pub unsafe fn vmov_n_u64(value: u64) -> uint64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmov_n_p8(value: p8) -> poly8x8_t { vdup_n_p8(value) } @@ -5816,6 +7414,10 @@ pub unsafe fn vmov_n_p8(value: p8) -> poly8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmov_n_p16(value: p16) -> poly16x4_t { vdup_n_p16(value) } @@ -5830,6 +7432,10 @@ pub unsafe fn vmov_n_p16(value: p16) -> poly16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmov_n_f32(value: f32) -> float32x2_t { vdup_n_f32(value) } @@ -5844,6 +7450,10 @@ pub unsafe fn vmov_n_f32(value: f32) -> float32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovq_n_s8(value: i8) -> int8x16_t { vdupq_n_s8(value) } @@ -5858,6 +7468,10 @@ pub unsafe fn vmovq_n_s8(value: i8) -> int8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovq_n_s16(value: i16) -> int16x8_t { vdupq_n_s16(value) } @@ -5872,6 +7486,10 @@ pub unsafe fn vmovq_n_s16(value: i16) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovq_n_s32(value: i32) -> int32x4_t { vdupq_n_s32(value) } @@ -5886,6 +7504,10 @@ pub unsafe fn vmovq_n_s32(value: i32) -> int32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovq_n_s64(value: i64) -> int64x2_t { vdupq_n_s64(value) } @@ -5900,6 +7522,10 @@ pub unsafe fn vmovq_n_s64(value: i64) -> int64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovq_n_u8(value: u8) -> uint8x16_t { vdupq_n_u8(value) } @@ -5914,6 +7540,10 @@ pub unsafe fn vmovq_n_u8(value: u8) -> uint8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovq_n_u16(value: u16) -> uint16x8_t { vdupq_n_u16(value) } @@ -5928,6 +7558,10 @@ pub unsafe fn vmovq_n_u16(value: u16) -> uint16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovq_n_u32(value: u32) -> uint32x4_t { vdupq_n_u32(value) } @@ -5942,6 +7576,10 @@ pub unsafe fn vmovq_n_u32(value: u32) -> uint32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovq_n_u64(value: u64) -> uint64x2_t { vdupq_n_u64(value) } @@ -5956,6 +7594,10 @@ pub unsafe fn vmovq_n_u64(value: u64) -> uint64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovq_n_p8(value: p8) -> poly8x16_t { vdupq_n_p8(value) } @@ -5970,6 +7612,10 @@ pub unsafe fn vmovq_n_p8(value: p8) -> poly8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovq_n_p16(value: p16) -> poly16x8_t { vdupq_n_p16(value) } @@ -5984,6 +7630,10 @@ pub unsafe fn vmovq_n_p16(value: p16) -> poly16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vmovq_n_f32(value: f32) -> float32x4_t { vdupq_n_f32(value) } @@ -5999,6 +7649,10 @@ pub unsafe fn vmovq_n_f32(value: f32) -> float32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t { static_assert!(N == 0); a @@ -6015,6 +7669,10 @@ pub unsafe fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vext_u64(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_t { static_assert!(N == 0); a @@ -6030,6 +7688,10 @@ pub unsafe fn vext_u64(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_ not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { vcnt_s8_(a) } @@ -6043,6 +7705,10 @@ pub unsafe fn vcnt_s8(a: int8x8_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { vcntq_s8_(a) } @@ -6056,6 +7722,10 @@ pub unsafe fn vcntq_s8(a: int8x16_t) -> int8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { transmute(vcnt_s8_(transmute(a))) } @@ -6069,6 +7739,10 @@ pub unsafe fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { transmute(vcntq_s8_(transmute(a))) } @@ -6082,6 +7756,10 @@ pub unsafe fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { transmute(vcnt_s8_(transmute(a))) } @@ -6095,6 +7773,10 @@ pub unsafe fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { transmute(vcntq_s8_(transmute(a))) } @@ -6109,6 +7791,10 @@ pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev16_s8(a: int8x8_t) -> int8x8_t { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -6123,6 +7809,10 @@ pub unsafe fn vrev16_s8(a: int8x8_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev16q_s8(a: int8x16_t) -> int8x16_t { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } @@ -6137,6 +7827,10 @@ pub unsafe fn vrev16q_s8(a: int8x16_t) -> int8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev16_u8(a: uint8x8_t) -> uint8x8_t { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -6151,6 +7845,10 @@ pub unsafe fn vrev16_u8(a: uint8x8_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } @@ -6165,6 +7863,10 @@ pub unsafe fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev16_p8(a: poly8x8_t) -> poly8x8_t { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -6179,6 +7881,10 @@ pub unsafe fn vrev16_p8(a: poly8x8_t) -> poly8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) } @@ -6193,6 +7899,10 @@ pub unsafe fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev32_s8(a: int8x8_t) -> int8x8_t { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -6207,6 +7917,10 @@ pub unsafe fn vrev32_s8(a: int8x8_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev32q_s8(a: int8x16_t) -> int8x16_t { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } @@ -6221,6 +7935,10 @@ pub unsafe fn vrev32q_s8(a: int8x16_t) -> int8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev32_u8(a: uint8x8_t) -> uint8x8_t { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -6235,6 +7953,10 @@ pub unsafe fn vrev32_u8(a: uint8x8_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } @@ -6249,6 +7971,10 @@ pub unsafe fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev32_s16(a: int16x4_t) -> int16x4_t { simd_shuffle!(a, a, [1, 0, 3, 2]) } @@ -6263,6 +7989,10 @@ pub unsafe fn vrev32_s16(a: int16x4_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev32q_s16(a: int16x8_t) -> int16x8_t { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -6277,6 +8007,10 @@ pub unsafe fn vrev32q_s16(a: int16x8_t) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev32_p16(a: poly16x4_t) -> poly16x4_t { simd_shuffle!(a, a, [1, 0, 3, 2]) } @@ -6291,6 +8025,10 @@ pub unsafe fn vrev32_p16(a: poly16x4_t) -> poly16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -6305,6 +8043,10 @@ pub unsafe fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev32_u16(a: uint16x4_t) -> uint16x4_t { simd_shuffle!(a, a, [1, 0, 3, 2]) } @@ -6319,6 +8061,10 @@ pub unsafe fn vrev32_u16(a: uint16x4_t) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t { simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6]) } @@ -6333,6 +8079,10 @@ pub unsafe fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev32_p8(a: poly8x8_t) -> poly8x8_t { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -6347,6 +8097,10 @@ pub unsafe fn vrev32_p8(a: poly8x8_t) -> poly8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]) } @@ -6361,6 +8115,10 @@ pub unsafe fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64_s8(a: int8x8_t) -> int8x8_t { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -6375,6 +8133,10 @@ pub unsafe fn vrev64_s8(a: int8x8_t) -> int8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64q_s8(a: int8x16_t) -> int8x16_t { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } @@ -6389,6 +8151,10 @@ pub unsafe fn vrev64q_s8(a: int8x16_t) -> int8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64_s16(a: int16x4_t) -> int16x4_t { simd_shuffle!(a, a, [3, 2, 1, 0]) } @@ -6403,6 +8169,10 @@ pub unsafe fn vrev64_s16(a: int16x4_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64q_s16(a: int16x8_t) -> int16x8_t { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -6417,6 +8187,10 @@ pub unsafe fn vrev64q_s16(a: int16x8_t) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64_s32(a: int32x2_t) -> int32x2_t { simd_shuffle!(a, a, [1, 0]) } @@ -6431,6 +8205,10 @@ pub unsafe fn vrev64_s32(a: int32x2_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64q_s32(a: int32x4_t) -> int32x4_t { simd_shuffle!(a, a, [1, 0, 3, 2]) } @@ -6445,6 +8223,10 @@ pub unsafe fn vrev64q_s32(a: int32x4_t) -> int32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64_u8(a: uint8x8_t) -> uint8x8_t { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -6459,6 +8241,10 @@ pub unsafe fn vrev64_u8(a: uint8x8_t) -> uint8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } @@ -6473,6 +8259,10 @@ pub unsafe fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64_u16(a: uint16x4_t) -> uint16x4_t { simd_shuffle!(a, a, [3, 2, 1, 0]) } @@ -6487,6 +8277,10 @@ pub unsafe fn vrev64_u16(a: uint16x4_t) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -6501,6 +8295,10 @@ pub unsafe fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64_u32(a: uint32x2_t) -> uint32x2_t { simd_shuffle!(a, a, [1, 0]) } @@ -6515,6 +8313,10 @@ pub unsafe fn vrev64_u32(a: uint32x2_t) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t { simd_shuffle!(a, a, [1, 0, 3, 2]) } @@ -6529,6 +8331,10 @@ pub unsafe fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64_f32(a: float32x2_t) -> float32x2_t { simd_shuffle!(a, a, [1, 0]) } @@ -6543,6 +8349,10 @@ pub unsafe fn vrev64_f32(a: float32x2_t) -> float32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64q_f32(a: float32x4_t) -> float32x4_t { simd_shuffle!(a, a, [1, 0, 3, 2]) } @@ -6557,6 +8367,10 @@ pub unsafe fn vrev64q_f32(a: float32x4_t) -> float32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64_p8(a: poly8x8_t) -> poly8x8_t { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -6571,6 +8385,10 @@ pub unsafe fn vrev64_p8(a: poly8x8_t) -> poly8x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]) } @@ -6585,6 +8403,10 @@ pub unsafe fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64_p16(a: poly16x4_t) -> poly16x4_t { simd_shuffle!(a, a, [3, 2, 1, 0]) } @@ -6599,6 +8421,10 @@ pub unsafe fn vrev64_p16(a: poly16x4_t) -> poly16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t { simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4]) } @@ -6613,6 +8439,10 @@ pub unsafe fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { #[cfg(target_arch = "arm")] { @@ -6634,6 +8464,10 @@ pub unsafe fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { #[cfg(target_arch = "arm")] { @@ -6655,6 +8489,10 @@ pub unsafe fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { #[cfg(target_arch = "arm")] { @@ -6676,6 +8514,10 @@ pub unsafe fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { #[cfg(target_arch = "arm")] { @@ -6697,6 +8539,10 @@ pub unsafe fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { #[cfg(target_arch = "arm")] { @@ -6718,6 +8564,10 @@ pub unsafe fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { #[cfg(target_arch = "arm")] { @@ -6739,6 +8589,10 @@ pub unsafe fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { #[cfg(target_arch = "arm")] { @@ -6760,6 +8614,10 @@ pub unsafe fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { #[cfg(target_arch = "arm")] { @@ -6781,6 +8639,10 @@ pub unsafe fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { #[cfg(target_arch = "arm")] { @@ -6802,6 +8664,10 @@ pub unsafe fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { #[cfg(target_arch = "arm")] { @@ -6823,6 +8689,10 @@ pub unsafe fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { #[cfg(target_arch = "arm")] { @@ -6844,6 +8714,10 @@ pub unsafe fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { #[cfg(target_arch = "arm")] { @@ -6862,6 +8736,14 @@ pub unsafe fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smmla))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_i8mm", issue = "117223") +)] pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6882,6 +8764,14 @@ pub unsafe fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ummla))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_i8mm", issue = "117223") +)] pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6902,6 +8792,14 @@ pub unsafe fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usmmla))] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] +#[cfg_attr( + not(target_arch = "arm"), + unstable(feature = "stdarch_neon_i8mm", issue = "117223") +)] pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6921,7 +8819,10 @@ pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4 #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] -pub unsafe fn vcombine_f16 ( low: float16x4_t, high: float16x4_t) -> float16x8_t { +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcombine_f16 ( low: float16x4_t, high: float16x4_t) -> float16x8_t { simd_shuffle!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) } */ @@ -6931,7 +8832,14 @@ pub unsafe fn vcombine_f16 ( low: float16x4_t, high: float16x4_t) -> float16x8_ #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcombine_f32(low: float32x2_t, high: float32x2_t) -> float32x4_t { simd_shuffle!(low, high, [0, 1, 2, 3]) } @@ -6941,7 +8849,14 @@ pub unsafe fn vcombine_f32(low: float32x2_t, high: float32x2_t) -> float32x4_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcombine_p8(low: poly8x8_t, high: poly8x8_t) -> poly8x16_t { simd_shuffle!( low, @@ -6955,7 +8870,14 @@ pub unsafe fn vcombine_p8(low: poly8x8_t, high: poly8x8_t) -> poly8x16_t { #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] +#[cfg_attr( + not(target_arch = "arm"), + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcombine_p16(low: poly16x4_t, high: poly16x4_t) -> poly16x8_t { simd_shuffle!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -6966,9 +8888,13 @@ pub unsafe fn vcombine_p16(low: poly16x4_t, high: poly16x4_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] #[cfg_attr( - target_arch = "aarch64", + not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcombine_s8(low: int8x8_t, high: int8x8_t) -> int8x16_t { simd_shuffle!( low, @@ -6983,9 +8909,13 @@ pub unsafe fn vcombine_s8(low: int8x8_t, high: int8x8_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] #[cfg_attr( - target_arch = "aarch64", + not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcombine_s16(low: int16x4_t, high: int16x4_t) -> int16x8_t { simd_shuffle!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -6996,9 +8926,13 @@ pub unsafe fn vcombine_s16(low: int16x4_t, high: int16x4_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] #[cfg_attr( - target_arch = "aarch64", + not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcombine_s32(low: int32x2_t, high: int32x2_t) -> int32x4_t { simd_shuffle!(low, high, [0, 1, 2, 3]) } @@ -7009,9 +8943,13 @@ pub unsafe fn vcombine_s32(low: int32x2_t, high: int32x2_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] #[cfg_attr( - target_arch = "aarch64", + not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcombine_s64(low: int64x1_t, high: int64x1_t) -> int64x2_t { simd_shuffle!(low, high, [0, 1]) } @@ -7022,9 +8960,13 @@ pub unsafe fn vcombine_s64(low: int64x1_t, high: int64x1_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] #[cfg_attr( - target_arch = "aarch64", + not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcombine_u8(low: uint8x8_t, high: uint8x8_t) -> uint8x16_t { simd_shuffle!( low, @@ -7039,9 +8981,13 @@ pub unsafe fn vcombine_u8(low: uint8x8_t, high: uint8x8_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] #[cfg_attr( - target_arch = "aarch64", + not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcombine_u16(low: uint16x4_t, high: uint16x4_t) -> uint16x8_t { simd_shuffle!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -7053,9 +8999,13 @@ pub unsafe fn vcombine_u16(low: uint16x4_t, high: uint16x4_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov))] #[cfg_attr( - target_arch = "aarch64", + not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcombine_u32(low: uint32x2_t, high: uint32x2_t) -> uint32x4_t { simd_shuffle!(low, high, [0, 1, 2, 3]) } @@ -7066,9 +9016,13 @@ pub unsafe fn vcombine_u32(low: uint32x2_t, high: uint32x2_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] #[cfg_attr( - target_arch = "aarch64", + not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcombine_u64(low: uint64x1_t, high: uint64x1_t) -> uint64x2_t { simd_shuffle!(low, high, [0, 1]) } @@ -7079,9 +9033,13 @@ pub unsafe fn vcombine_u64(low: uint64x1_t, high: uint64x1_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] #[cfg_attr( - target_arch = "aarch64", + not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] +#[cfg_attr( + target_arch = "arm", + unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") +)] pub unsafe fn vcombine_p64(low: poly64x1_t, high: poly64x1_t) -> poly64x2_t { simd_shuffle!(low, high, [0, 1]) } diff --git a/crates/core_arch/src/lib.rs b/crates/core_arch/src/lib.rs index 27dad8e241..ed301e208a 100644 --- a/crates/core_arch/src/lib.rs +++ b/crates/core_arch/src/lib.rs @@ -16,7 +16,6 @@ intrinsics, no_core, rustc_attrs, - stdsimd, staged_api, doc_cfg, tbm_target_feature, @@ -53,11 +52,15 @@ )] #![cfg_attr(test, allow(unused_imports))] #![no_std] -#![unstable(feature = "stdsimd", issue = "27731")] +#![stable(feature = "stdsimd", since = "1.27.0")] #![doc( test(attr(deny(warnings))), test(attr(allow(dead_code, deprecated, unused_variables, unused_mut))) )] +#![cfg_attr( + test, + feature(stdarch_arm_feature_detection, stdarch_powerpc_feature_detection) +)] #[cfg(test)] #[macro_use] @@ -68,8 +71,12 @@ extern crate std_detect; #[path = "mod.rs"] mod core_arch; +#[stable(feature = "stdsimd", since = "1.27.0")] pub mod arch { + #[stable(feature = "stdsimd", since = "1.27.0")] + #[allow(unused_imports)] pub use crate::core_arch::arch::*; + #[stable(feature = "stdsimd", since = "1.27.0")] pub use core::arch::asm; } diff --git a/crates/core_arch/src/mips/mod.rs b/crates/core_arch/src/mips/mod.rs index 96905aedc0..1de3ffd03d 100644 --- a/crates/core_arch/src/mips/mod.rs +++ b/crates/core_arch/src/mips/mod.rs @@ -5,6 +5,7 @@ #[cfg(target_feature = "fp64")] mod msa; #[cfg(target_feature = "fp64")] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub use self::msa::*; #[cfg(test)] @@ -13,6 +14,7 @@ use stdarch_test::assert_instr; /// Generates the trap instruction `BREAK` #[cfg_attr(test, assert_instr(break))] #[inline] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn break_() -> ! { crate::intrinsics::abort() } diff --git a/crates/core_arch/src/mips/msa.rs b/crates/core_arch/src/mips/msa.rs index 3e93db85e2..2e39f671d6 100644 --- a/crates/core_arch/src/mips/msa.rs +++ b/crates/core_arch/src/mips/msa.rs @@ -11,57 +11,67 @@ use stdarch_test::assert_instr; use crate::mem; types! { - // / MIPS-specific 128-bit wide vector of 16 packed `i8`. - pub struct v16i8( - i8, i8, i8, i8, i8, i8, i8, i8, - i8, i8, i8, i8, i8, i8, i8, i8, - ); - - // / MIPS-specific 128-bit wide vector of 8 packed `i16`. - pub struct v8i16( - i16, i16, i16, i16, i16, i16, i16, i16, - ); - - // / MIPS-specific 128-bit wide vector of 4 packed `i32`. - pub struct v4i32( - i32, i32, i32, i32, - ); - - // / MIPS-specific 128-bit wide vector of 2 packed `i64`. - pub struct v2i64( - i64, i64, - ); - - // / MIPS-specific 128-bit wide vector of 16 packed `u8`. - pub struct v16u8( - u8, u8, u8, u8, u8, u8, u8, u8, - u8, u8, u8, u8, u8, u8, u8, u8, - ); - - // / MIPS-specific 128-bit wide vector of 8 packed `u16`. - pub struct v8u16( - u16, u16, u16, u16, u16, u16, u16, u16, - ); - - // / MIPS-specific 128-bit wide vector of 4 packed `u32`. - pub struct v4u32( - u32, u32, u32, u32, - ); - - // / MIPS-specific 128-bit wide vector of 2 packed `u64`. - pub struct v2u64( - u64, u64, - ); - - // / MIPS-specific 128-bit wide vector of 4 packed `f32`. - pub struct v4f32( - f32, f32, f32, f32, - ); - - // / MIPS-specific 128-bit wide vector of 2 packed `f64`. - pub struct v2f64( - f64, f64, - ); + /// MIPS-specific 128-bit wide vector of 16 packed `i8`. + #[unstable(feature = "stdarch_mips", issue = "111198")] + pub struct v16i8( + i8, i8, i8, i8, i8, i8, i8, i8, + i8, i8, i8, i8, i8, i8, i8, i8, + ); + + /// MIPS-specific 128-bit wide vector of 8 packed `i16`. + #[unstable(feature = "stdarch_mips", issue = "111198")] + pub struct v8i16( + i16, i16, i16, i16, i16, i16, i16, i16, + ); + + /// MIPS-specific 128-bit wide vector of 4 packed `i32`. + #[unstable(feature = "stdarch_mips", issue = "111198")] + pub struct v4i32( + i32, i32, i32, i32, + ); + + /// MIPS-specific 128-bit wide vector of 2 packed `i64`. + #[unstable(feature = "stdarch_mips", issue = "111198")] + pub struct v2i64( + i64, i64, + ); + + /// MIPS-specific 128-bit wide vector of 16 packed `u8`. + #[unstable(feature = "stdarch_mips", issue = "111198")] + pub struct v16u8( + u8, u8, u8, u8, u8, u8, u8, u8, + u8, u8, u8, u8, u8, u8, u8, u8, + ); + + /// MIPS-specific 128-bit wide vector of 8 packed `u16`. + #[unstable(feature = "stdarch_mips", issue = "111198")] + pub struct v8u16( + u16, u16, u16, u16, u16, u16, u16, u16, + ); + + /// MIPS-specific 128-bit wide vector of 4 packed `u32`. + #[unstable(feature = "stdarch_mips", issue = "111198")] + pub struct v4u32( + u32, u32, u32, u32, + ); + + /// MIPS-specific 128-bit wide vector of 2 packed `u64`. + #[unstable(feature = "stdarch_mips", issue = "111198")] + pub struct v2u64( + u64, u64, + ); + + // / MIPS-specific 128-bit wide vector of 4 packed `f32`. + #[unstable(feature = "stdarch_mips", issue = "111198")] + pub struct v4f32( + f32, f32, f32, f32, + ); + + /// MIPS-specific 128-bit wide vector of 2 packed `f64`. + #[unstable(feature = "stdarch_mips", issue = "111198")] + pub struct v2f64( + f64, f64, + ); } #[allow(improper_ctypes)] @@ -1140,6 +1150,7 @@ extern "C" { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(add_a.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_add_a_b(a: v16i8, b: v16i8) -> v16i8 { msa_add_a_b(a, mem::transmute(b)) } @@ -1153,6 +1164,7 @@ pub unsafe fn __msa_add_a_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(add_a.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_add_a_h(a: v8i16, b: v8i16) -> v8i16 { msa_add_a_h(a, mem::transmute(b)) } @@ -1166,6 +1178,7 @@ pub unsafe fn __msa_add_a_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(add_a.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_add_a_w(a: v4i32, b: v4i32) -> v4i32 { msa_add_a_w(a, mem::transmute(b)) } @@ -1179,6 +1192,7 @@ pub unsafe fn __msa_add_a_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(add_a.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_add_a_d(a: v2i64, b: v2i64) -> v2i64 { msa_add_a_d(a, mem::transmute(b)) } @@ -1192,6 +1206,7 @@ pub unsafe fn __msa_add_a_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_a.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_adds_a_b(a: v16i8, b: v16i8) -> v16i8 { msa_adds_a_b(a, mem::transmute(b)) } @@ -1205,6 +1220,7 @@ pub unsafe fn __msa_adds_a_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_a.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_adds_a_h(a: v8i16, b: v8i16) -> v8i16 { msa_adds_a_h(a, mem::transmute(b)) } @@ -1218,6 +1234,7 @@ pub unsafe fn __msa_adds_a_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_a.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_adds_a_w(a: v4i32, b: v4i32) -> v4i32 { msa_adds_a_w(a, mem::transmute(b)) } @@ -1231,6 +1248,7 @@ pub unsafe fn __msa_adds_a_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_a.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_adds_a_d(a: v2i64, b: v2i64) -> v2i64 { msa_adds_a_d(a, mem::transmute(b)) } @@ -1245,6 +1263,7 @@ pub unsafe fn __msa_adds_a_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_s.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_adds_s_b(a: v16i8, b: v16i8) -> v16i8 { msa_adds_s_b(a, mem::transmute(b)) } @@ -1259,6 +1278,7 @@ pub unsafe fn __msa_adds_s_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_s.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_adds_s_h(a: v8i16, b: v8i16) -> v8i16 { msa_adds_s_h(a, mem::transmute(b)) } @@ -1273,6 +1293,7 @@ pub unsafe fn __msa_adds_s_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_adds_s_w(a: v4i32, b: v4i32) -> v4i32 { msa_adds_s_w(a, mem::transmute(b)) } @@ -1287,6 +1308,7 @@ pub unsafe fn __msa_adds_s_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_adds_s_d(a: v2i64, b: v2i64) -> v2i64 { msa_adds_s_d(a, mem::transmute(b)) } @@ -1301,6 +1323,7 @@ pub unsafe fn __msa_adds_s_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_u.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_adds_u_b(a: v16u8, b: v16u8) -> v16u8 { msa_adds_u_b(a, mem::transmute(b)) } @@ -1315,6 +1338,7 @@ pub unsafe fn __msa_adds_u_b(a: v16u8, b: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_u.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_adds_u_h(a: v8u16, b: v8u16) -> v8u16 { msa_adds_u_h(a, mem::transmute(b)) } @@ -1329,6 +1353,7 @@ pub unsafe fn __msa_adds_u_h(a: v8u16, b: v8u16) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_adds_u_w(a: v4u32, b: v4u32) -> v4u32 { msa_adds_u_w(a, mem::transmute(b)) } @@ -1343,6 +1368,7 @@ pub unsafe fn __msa_adds_u_w(a: v4u32, b: v4u32) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(adds_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_adds_u_d(a: v2u64, b: v2u64) -> v2u64 { msa_adds_u_d(a, mem::transmute(b)) } @@ -1356,6 +1382,7 @@ pub unsafe fn __msa_adds_u_d(a: v2u64, b: v2u64) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(addv.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_addv_b(a: v16i8, b: v16i8) -> v16i8 { msa_addv_b(a, mem::transmute(b)) } @@ -1369,6 +1396,7 @@ pub unsafe fn __msa_addv_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(addv.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_addv_h(a: v8i16, b: v8i16) -> v8i16 { msa_addv_h(a, mem::transmute(b)) } @@ -1382,6 +1410,7 @@ pub unsafe fn __msa_addv_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(addv.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_addv_w(a: v4i32, b: v4i32) -> v4i32 { msa_addv_w(a, mem::transmute(b)) } @@ -1395,6 +1424,7 @@ pub unsafe fn __msa_addv_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(addv.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_addv_d(a: v2i64, b: v2i64) -> v2i64 { msa_addv_d(a, mem::transmute(b)) } @@ -1409,6 +1439,7 @@ pub unsafe fn __msa_addv_d(a: v2i64, b: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(addvi.b, imm5 = 0b10111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_addvi_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM5, 5); msa_addvi_b(a, IMM5) @@ -1424,6 +1455,7 @@ pub unsafe fn __msa_addvi_b(a: v16i8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(addvi.h, imm5 = 0b10111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_addvi_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM5, 5); msa_addvi_h(a, IMM5) @@ -1439,6 +1471,7 @@ pub unsafe fn __msa_addvi_h(a: v8i16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(addvi.w, imm5 = 0b10111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_addvi_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); msa_addvi_w(a, IMM5) @@ -1454,6 +1487,7 @@ pub unsafe fn __msa_addvi_w(a: v4i32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(addvi.d, imm5 = 0b10111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_addvi_d(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM5, 5); msa_addvi_d(a, IMM5) @@ -1469,6 +1503,7 @@ pub unsafe fn __msa_addvi_d(a: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(and.v))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_and_v(a: v16u8, b: v16u8) -> v16u8 { msa_and_v(a, mem::transmute(b)) } @@ -1483,6 +1518,7 @@ pub unsafe fn __msa_and_v(a: v16u8, b: v16u8) -> v16u8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(andi.b, imm8 = 0b10010111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_andi_b(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM8, 8); msa_andi_b(a, IMM8) @@ -1497,6 +1533,7 @@ pub unsafe fn __msa_andi_b(a: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(asub_s.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_asub_s_b(a: v16i8, b: v16i8) -> v16i8 { msa_asub_s_b(a, mem::transmute(b)) } @@ -1510,6 +1547,7 @@ pub unsafe fn __msa_asub_s_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(asub_s.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_asub_s_h(a: v8i16, b: v8i16) -> v8i16 { msa_asub_s_h(a, mem::transmute(b)) } @@ -1523,6 +1561,7 @@ pub unsafe fn __msa_asub_s_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(asub_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_asub_s_w(a: v4i32, b: v4i32) -> v4i32 { msa_asub_s_w(a, mem::transmute(b)) } @@ -1536,6 +1575,7 @@ pub unsafe fn __msa_asub_s_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(asub_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_asub_s_d(a: v2i64, b: v2i64) -> v2i64 { msa_asub_s_d(a, mem::transmute(b)) } @@ -1549,6 +1589,7 @@ pub unsafe fn __msa_asub_s_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(asub_u.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_asub_u_b(a: v16u8, b: v16u8) -> v16u8 { msa_asub_u_b(a, mem::transmute(b)) } @@ -1562,6 +1603,7 @@ pub unsafe fn __msa_asub_u_b(a: v16u8, b: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(asub_u.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_asub_u_h(a: v8u16, b: v8u16) -> v8u16 { msa_asub_u_h(a, mem::transmute(b)) } @@ -1575,6 +1617,7 @@ pub unsafe fn __msa_asub_u_h(a: v8u16, b: v8u16) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(asub_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_asub_u_w(a: v4u32, b: v4u32) -> v4u32 { msa_asub_u_w(a, mem::transmute(b)) } @@ -1588,6 +1631,7 @@ pub unsafe fn __msa_asub_u_w(a: v4u32, b: v4u32) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(asub_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_asub_u_d(a: v2u64, b: v2u64) -> v2u64 { msa_asub_u_d(a, mem::transmute(b)) } @@ -1603,6 +1647,7 @@ pub unsafe fn __msa_asub_u_d(a: v2u64, b: v2u64) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ave_s.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ave_s_b(a: v16i8, b: v16i8) -> v16i8 { msa_ave_s_b(a, mem::transmute(b)) } @@ -1618,6 +1663,7 @@ pub unsafe fn __msa_ave_s_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ave_s.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ave_s_h(a: v8i16, b: v8i16) -> v8i16 { msa_ave_s_h(a, mem::transmute(b)) } @@ -1633,6 +1679,7 @@ pub unsafe fn __msa_ave_s_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ave_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ave_s_w(a: v4i32, b: v4i32) -> v4i32 { msa_ave_s_w(a, mem::transmute(b)) } @@ -1648,6 +1695,7 @@ pub unsafe fn __msa_ave_s_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ave_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ave_s_d(a: v2i64, b: v2i64) -> v2i64 { msa_ave_s_d(a, mem::transmute(b)) } @@ -1663,6 +1711,7 @@ pub unsafe fn __msa_ave_s_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ave_u.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ave_u_b(a: v16u8, b: v16u8) -> v16u8 { msa_ave_u_b(a, mem::transmute(b)) } @@ -1678,6 +1727,7 @@ pub unsafe fn __msa_ave_u_b(a: v16u8, b: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ave_u.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ave_u_h(a: v8u16, b: v8u16) -> v8u16 { msa_ave_u_h(a, mem::transmute(b)) } @@ -1693,6 +1743,7 @@ pub unsafe fn __msa_ave_u_h(a: v8u16, b: v8u16) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ave_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ave_u_w(a: v4u32, b: v4u32) -> v4u32 { msa_ave_u_w(a, mem::transmute(b)) } @@ -1708,6 +1759,7 @@ pub unsafe fn __msa_ave_u_w(a: v4u32, b: v4u32) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ave_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ave_u_d(a: v2u64, b: v2u64) -> v2u64 { msa_ave_u_d(a, mem::transmute(b)) } @@ -1724,6 +1776,7 @@ pub unsafe fn __msa_ave_u_d(a: v2u64, b: v2u64) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(aver_s.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_aver_s_b(a: v16i8, b: v16i8) -> v16i8 { msa_aver_s_b(a, mem::transmute(b)) } @@ -1740,6 +1793,7 @@ pub unsafe fn __msa_aver_s_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(aver_s.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_aver_s_h(a: v8i16, b: v8i16) -> v8i16 { msa_aver_s_h(a, mem::transmute(b)) } @@ -1756,6 +1810,7 @@ pub unsafe fn __msa_aver_s_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(aver_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_aver_s_w(a: v4i32, b: v4i32) -> v4i32 { msa_aver_s_w(a, mem::transmute(b)) } @@ -1772,6 +1827,7 @@ pub unsafe fn __msa_aver_s_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(aver_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_aver_s_d(a: v2i64, b: v2i64) -> v2i64 { msa_aver_s_d(a, mem::transmute(b)) } @@ -1788,6 +1844,7 @@ pub unsafe fn __msa_aver_s_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(aver_u.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_aver_u_b(a: v16u8, b: v16u8) -> v16u8 { msa_aver_u_b(a, mem::transmute(b)) } @@ -1804,6 +1861,7 @@ pub unsafe fn __msa_aver_u_b(a: v16u8, b: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(aver_u.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_aver_u_h(a: v8u16, b: v8u16) -> v8u16 { msa_aver_u_h(a, mem::transmute(b)) } @@ -1820,6 +1878,7 @@ pub unsafe fn __msa_aver_u_h(a: v8u16, b: v8u16) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(aver_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_aver_u_w(a: v4u32, b: v4u32) -> v4u32 { msa_aver_u_w(a, mem::transmute(b)) } @@ -1836,6 +1895,7 @@ pub unsafe fn __msa_aver_u_w(a: v4u32, b: v4u32) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(aver_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_aver_u_d(a: v2u64, b: v2u64) -> v2u64 { msa_aver_u_d(a, mem::transmute(b)) } @@ -1850,6 +1910,7 @@ pub unsafe fn __msa_aver_u_d(a: v2u64, b: v2u64) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bclr.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bclr_b(a: v16u8, b: v16u8) -> v16u8 { msa_bclr_b(a, mem::transmute(b)) } @@ -1864,6 +1925,7 @@ pub unsafe fn __msa_bclr_b(a: v16u8, b: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bclr.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bclr_h(a: v8u16, b: v8u16) -> v8u16 { msa_bclr_h(a, mem::transmute(b)) } @@ -1878,6 +1940,7 @@ pub unsafe fn __msa_bclr_h(a: v8u16, b: v8u16) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bclr.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bclr_w(a: v4u32, b: v4u32) -> v4u32 { msa_bclr_w(a, mem::transmute(b)) } @@ -1892,6 +1955,7 @@ pub unsafe fn __msa_bclr_w(a: v4u32, b: v4u32) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bclr.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bclr_d(a: v2u64, b: v2u64) -> v2u64 { msa_bclr_d(a, mem::transmute(b)) } @@ -1906,6 +1970,7 @@ pub unsafe fn __msa_bclr_d(a: v2u64, b: v2u64) -> v2u64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bclri.b, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bclri_b(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM3, 3); msa_bclri_b(a, IMM3) @@ -1921,6 +1986,7 @@ pub unsafe fn __msa_bclri_b(a: v16u8) -> v16u8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bclri.h, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bclri_h(a: v8u16) -> v8u16 { static_assert_uimm_bits!(IMM4, 4); msa_bclri_h(a, IMM4) @@ -1936,6 +2002,7 @@ pub unsafe fn __msa_bclri_h(a: v8u16) -> v8u16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bclri.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bclri_w(a: v4u32) -> v4u32 { static_assert_uimm_bits!(IMM5, 5); msa_bclri_w(a, IMM5) @@ -1951,6 +2018,7 @@ pub unsafe fn __msa_bclri_w(a: v4u32) -> v4u32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bclri.d, imm6 = 0b111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bclri_d(a: v2u64) -> v2u64 { static_assert_uimm_bits!(IMM6, 6); msa_bclri_d(a, IMM6) @@ -1966,6 +2034,7 @@ pub unsafe fn __msa_bclri_d(a: v2u64) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsl.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsl_b(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { msa_binsl_b(a, mem::transmute(b), c) } @@ -1980,6 +2049,7 @@ pub unsafe fn __msa_binsl_b(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsl.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsl_h(a: v8u16, b: v8u16, c: v8u16) -> v8u16 { msa_binsl_h(a, mem::transmute(b), c) } @@ -1994,6 +2064,7 @@ pub unsafe fn __msa_binsl_h(a: v8u16, b: v8u16, c: v8u16) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsl.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsl_w(a: v4u32, b: v4u32, c: v4u32) -> v4u32 { msa_binsl_w(a, mem::transmute(b), c) } @@ -2008,6 +2079,7 @@ pub unsafe fn __msa_binsl_w(a: v4u32, b: v4u32, c: v4u32) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsl.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsl_d(a: v2u64, b: v2u64, c: v2u64) -> v2u64 { msa_binsl_d(a, mem::transmute(b), c) } @@ -2022,6 +2094,7 @@ pub unsafe fn __msa_binsl_d(a: v2u64, b: v2u64, c: v2u64) -> v2u64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsli.b, imm3 = 0b111))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsli_b(a: v16u8, b: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM3, 3); msa_binsli_b(a, mem::transmute(b), IMM3) @@ -2037,6 +2110,7 @@ pub unsafe fn __msa_binsli_b(a: v16u8, b: v16u8) -> v16u8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsli.h, imm4 = 0b1111))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsli_h(a: v8u16, b: v8u16) -> v8u16 { static_assert_uimm_bits!(IMM4, 4); msa_binsli_h(a, mem::transmute(b), IMM4) @@ -2052,6 +2126,7 @@ pub unsafe fn __msa_binsli_h(a: v8u16, b: v8u16) -> v8u16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsli.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsli_w(a: v4u32, b: v4u32) -> v4u32 { static_assert_uimm_bits!(IMM5, 5); msa_binsli_w(a, mem::transmute(b), IMM5) @@ -2067,6 +2142,7 @@ pub unsafe fn __msa_binsli_w(a: v4u32, b: v4u32) -> v4u32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsli.d, imm6 = 0b111111))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsli_d(a: v2u64, b: v2u64) -> v2u64 { static_assert_uimm_bits!(IMM6, 6); msa_binsli_d(a, mem::transmute(b), IMM6) @@ -2082,6 +2158,7 @@ pub unsafe fn __msa_binsli_d(a: v2u64, b: v2u64) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsr.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsr_b(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { msa_binsr_b(a, mem::transmute(b), c) } @@ -2096,6 +2173,7 @@ pub unsafe fn __msa_binsr_b(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsr.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsr_h(a: v8u16, b: v8u16, c: v8u16) -> v8u16 { msa_binsr_h(a, mem::transmute(b), c) } @@ -2110,6 +2188,7 @@ pub unsafe fn __msa_binsr_h(a: v8u16, b: v8u16, c: v8u16) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsr.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsr_w(a: v4u32, b: v4u32, c: v4u32) -> v4u32 { msa_binsr_w(a, mem::transmute(b), c) } @@ -2124,6 +2203,7 @@ pub unsafe fn __msa_binsr_w(a: v4u32, b: v4u32, c: v4u32) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsr.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsr_d(a: v2u64, b: v2u64, c: v2u64) -> v2u64 { msa_binsr_d(a, mem::transmute(b), c) } @@ -2138,6 +2218,7 @@ pub unsafe fn __msa_binsr_d(a: v2u64, b: v2u64, c: v2u64) -> v2u64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsri.b, imm3 = 0b111))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsri_b(a: v16u8, b: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM3, 3); msa_binsri_b(a, mem::transmute(b), IMM3) @@ -2153,6 +2234,7 @@ pub unsafe fn __msa_binsri_b(a: v16u8, b: v16u8) -> v16u8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsri.h, imm4 = 0b1111))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsri_h(a: v8u16, b: v8u16) -> v8u16 { static_assert_uimm_bits!(IMM4, 4); msa_binsri_h(a, mem::transmute(b), IMM4) @@ -2168,6 +2250,7 @@ pub unsafe fn __msa_binsri_h(a: v8u16, b: v8u16) -> v8u16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsri.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsri_w(a: v4u32, b: v4u32) -> v4u32 { static_assert_uimm_bits!(IMM5, 5); msa_binsri_w(a, mem::transmute(b), IMM5) @@ -2183,6 +2266,7 @@ pub unsafe fn __msa_binsri_w(a: v4u32, b: v4u32) -> v4u32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(binsri.d, imm6 = 0b111111))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_binsri_d(a: v2u64, b: v2u64) -> v2u64 { static_assert_uimm_bits!(IMM6, 6); msa_binsri_d(a, mem::transmute(b), IMM6) @@ -2198,6 +2282,7 @@ pub unsafe fn __msa_binsri_d(a: v2u64, b: v2u64) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bmnz.v))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bmnz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { msa_bmnz_v(a, mem::transmute(b), c) } @@ -2212,6 +2297,7 @@ pub unsafe fn __msa_bmnz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bmnzi.b, imm8 = 0b11111111))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bmnzi_b(a: v16u8, b: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM8, 8); msa_bmnzi_b(a, mem::transmute(b), IMM8) @@ -2227,6 +2313,7 @@ pub unsafe fn __msa_bmnzi_b(a: v16u8, b: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bmz.v))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bmz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { msa_bmz_v(a, mem::transmute(b), c) } @@ -2241,6 +2328,7 @@ pub unsafe fn __msa_bmz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bmzi.b, imm8 = 0b11111111))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bmzi_b(a: v16u8, b: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM8, 8); msa_bmzi_b(a, mem::transmute(b), IMM8) @@ -2256,6 +2344,7 @@ pub unsafe fn __msa_bmzi_b(a: v16u8, b: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bneg.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bneg_b(a: v16u8, b: v16u8) -> v16u8 { msa_bneg_b(a, mem::transmute(b)) } @@ -2270,6 +2359,7 @@ pub unsafe fn __msa_bneg_b(a: v16u8, b: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bneg.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bneg_h(a: v8u16, b: v8u16) -> v8u16 { msa_bneg_h(a, mem::transmute(b)) } @@ -2284,6 +2374,7 @@ pub unsafe fn __msa_bneg_h(a: v8u16, b: v8u16) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bneg.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bneg_w(a: v4u32, b: v4u32) -> v4u32 { msa_bneg_w(a, mem::transmute(b)) } @@ -2298,6 +2389,7 @@ pub unsafe fn __msa_bneg_w(a: v4u32, b: v4u32) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bneg.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bneg_d(a: v2u64, b: v2u64) -> v2u64 { msa_bneg_d(a, mem::transmute(b)) } @@ -2312,6 +2404,7 @@ pub unsafe fn __msa_bneg_d(a: v2u64, b: v2u64) -> v2u64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnegi.b, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bnegi_b(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM3, 3); msa_bnegi_b(a, IMM3) @@ -2327,6 +2420,7 @@ pub unsafe fn __msa_bnegi_b(a: v16u8) -> v16u8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnegi.h, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bnegi_h(a: v8u16) -> v8u16 { static_assert_uimm_bits!(IMM4, 4); msa_bnegi_h(a, IMM4) @@ -2342,6 +2436,7 @@ pub unsafe fn __msa_bnegi_h(a: v8u16) -> v8u16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnegi.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bnegi_w(a: v4u32) -> v4u32 { static_assert_uimm_bits!(IMM5, 5); msa_bnegi_w(a, IMM5) @@ -2357,6 +2452,7 @@ pub unsafe fn __msa_bnegi_w(a: v4u32) -> v4u32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnegi.d, imm6 = 0b111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bnegi_d(a: v2u64) -> v2u64 { static_assert_uimm_bits!(IMM6, 6); msa_bnegi_d(a, IMM6) @@ -2369,6 +2465,7 @@ pub unsafe fn __msa_bnegi_d(a: v2u64) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnz.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bnz_b(a: v16u8) -> i32 { msa_bnz_b(a) } @@ -2380,6 +2477,7 @@ pub unsafe fn __msa_bnz_b(a: v16u8) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnz.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bnz_h(a: v8u16) -> i32 { msa_bnz_h(a) } @@ -2391,6 +2489,7 @@ pub unsafe fn __msa_bnz_h(a: v8u16) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnz.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bnz_w(a: v4u32) -> i32 { msa_bnz_w(a) } @@ -2402,6 +2501,7 @@ pub unsafe fn __msa_bnz_w(a: v4u32) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnz.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bnz_d(a: v2u64) -> i32 { msa_bnz_d(a) } @@ -2414,6 +2514,7 @@ pub unsafe fn __msa_bnz_d(a: v2u64) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bnz.v))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bnz_v(a: v16u8) -> i32 { msa_bnz_v(a) } @@ -2428,6 +2529,7 @@ pub unsafe fn __msa_bnz_v(a: v16u8) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bsel.v))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bsel_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { msa_bsel_v(a, mem::transmute(b), c) } @@ -2442,6 +2544,7 @@ pub unsafe fn __msa_bsel_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bseli.b, imm8 = 0b11111111))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bseli_b(a: v16u8, b: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM8, 8); msa_bseli_b(a, mem::transmute(b), IMM8) @@ -2457,6 +2560,7 @@ pub unsafe fn __msa_bseli_b(a: v16u8, b: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bset.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bset_b(a: v16u8, b: v16u8) -> v16u8 { msa_bset_b(a, mem::transmute(b)) } @@ -2471,6 +2575,7 @@ pub unsafe fn __msa_bset_b(a: v16u8, b: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bset.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bset_h(a: v8u16, b: v8u16) -> v8u16 { msa_bset_h(a, mem::transmute(b)) } @@ -2485,6 +2590,7 @@ pub unsafe fn __msa_bset_h(a: v8u16, b: v8u16) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bset.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bset_w(a: v4u32, b: v4u32) -> v4u32 { msa_bset_w(a, mem::transmute(b)) } @@ -2499,6 +2605,7 @@ pub unsafe fn __msa_bset_w(a: v4u32, b: v4u32) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bset.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bset_d(a: v2u64, b: v2u64) -> v2u64 { msa_bset_d(a, mem::transmute(b)) } @@ -2513,6 +2620,7 @@ pub unsafe fn __msa_bset_d(a: v2u64, b: v2u64) -> v2u64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bseti.b, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bseti_b(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM3, 3); msa_bseti_b(a, IMM3) @@ -2528,6 +2636,7 @@ pub unsafe fn __msa_bseti_b(a: v16u8) -> v16u8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bseti.h, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bseti_h(a: v8u16) -> v8u16 { static_assert_uimm_bits!(IMM4, 4); msa_bseti_h(a, IMM4) @@ -2543,6 +2652,7 @@ pub unsafe fn __msa_bseti_h(a: v8u16) -> v8u16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bseti.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bseti_w(a: v4u32) -> v4u32 { static_assert_uimm_bits!(IMM5, 5); msa_bseti_w(a, IMM5) @@ -2558,6 +2668,7 @@ pub unsafe fn __msa_bseti_w(a: v4u32) -> v4u32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bseti.d, imm6 = 0b111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bseti_d(a: v2u64) -> v2u64 { static_assert_uimm_bits!(IMM6, 6); msa_bseti_d(a, IMM6) @@ -2570,6 +2681,7 @@ pub unsafe fn __msa_bseti_d(a: v2u64) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bz.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bz_b(a: v16u8) -> i32 { msa_bz_b(a) } @@ -2581,6 +2693,7 @@ pub unsafe fn __msa_bz_b(a: v16u8) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bz.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bz_h(a: v8u16) -> i32 { msa_bz_h(a) } @@ -2592,6 +2705,7 @@ pub unsafe fn __msa_bz_h(a: v8u16) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bz.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bz_w(a: v4u32) -> i32 { msa_bz_w(a) } @@ -2603,6 +2717,7 @@ pub unsafe fn __msa_bz_w(a: v4u32) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bz.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bz_d(a: v2u64) -> i32 { msa_bz_d(a) } @@ -2615,6 +2730,7 @@ pub unsafe fn __msa_bz_d(a: v2u64) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(bz.v))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_bz_v(a: v16u8) -> i32 { msa_bz_v(a) } @@ -2628,6 +2744,7 @@ pub unsafe fn __msa_bz_v(a: v16u8) -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ceq.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ceq_b(a: v16i8, b: v16i8) -> v16i8 { msa_ceq_b(a, mem::transmute(b)) } @@ -2641,6 +2758,7 @@ pub unsafe fn __msa_ceq_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ceq.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ceq_h(a: v8i16, b: v8i16) -> v8i16 { msa_ceq_h(a, mem::transmute(b)) } @@ -2654,6 +2772,7 @@ pub unsafe fn __msa_ceq_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ceq.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ceq_w(a: v4i32, b: v4i32) -> v4i32 { msa_ceq_w(a, mem::transmute(b)) } @@ -2667,6 +2786,7 @@ pub unsafe fn __msa_ceq_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ceq.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ceq_d(a: v2i64, b: v2i64) -> v2i64 { msa_ceq_d(a, mem::transmute(b)) } @@ -2681,6 +2801,7 @@ pub unsafe fn __msa_ceq_d(a: v2i64, b: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ceqi.b, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ceqi_b(a: v16i8) -> v16i8 { static_assert_simm_bits!(IMM_S5, 5); msa_ceqi_b(a, IMM_S5) @@ -2696,6 +2817,7 @@ pub unsafe fn __msa_ceqi_b(a: v16i8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ceqi.h, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ceqi_h(a: v8i16) -> v8i16 { static_assert_simm_bits!(IMM_S5, 5); msa_ceqi_h(a, IMM_S5) @@ -2711,6 +2833,7 @@ pub unsafe fn __msa_ceqi_h(a: v8i16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ceqi.w, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ceqi_w(a: v4i32) -> v4i32 { static_assert_simm_bits!(IMM_S5, 5); msa_ceqi_w(a, IMM_S5) @@ -2726,6 +2849,7 @@ pub unsafe fn __msa_ceqi_w(a: v4i32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ceqi.d, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ceqi_d(a: v2i64) -> v2i64 { static_assert_simm_bits!(IMM_S5, 5); msa_ceqi_d(a, IMM_S5) @@ -2740,6 +2864,7 @@ pub unsafe fn __msa_ceqi_d(a: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(cfcmsa, imm5 = 0b11111))] #[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_cfcmsa() -> i32 { static_assert_uimm_bits!(IMM5, 5); msa_cfcmsa(IMM5) @@ -2755,6 +2880,7 @@ pub unsafe fn __msa_cfcmsa() -> i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(cle_s.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_cle_s_b(a: v16i8, b: v16i8) -> v16i8 { msa_cle_s_b(a, mem::transmute(b)) } @@ -2769,6 +2895,7 @@ pub unsafe fn __msa_cle_s_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(cle_s.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_cle_s_h(a: v8i16, b: v8i16) -> v8i16 { msa_cle_s_h(a, mem::transmute(b)) } @@ -2783,6 +2910,7 @@ pub unsafe fn __msa_cle_s_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(cle_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_cle_s_w(a: v4i32, b: v4i32) -> v4i32 { msa_cle_s_w(a, mem::transmute(b)) } @@ -2797,6 +2925,7 @@ pub unsafe fn __msa_cle_s_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(cle_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_cle_s_d(a: v2i64, b: v2i64) -> v2i64 { msa_cle_s_d(a, mem::transmute(b)) } @@ -2811,6 +2940,7 @@ pub unsafe fn __msa_cle_s_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(cle_u.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_cle_u_b(a: v16u8, b: v16u8) -> v16i8 { msa_cle_u_b(a, mem::transmute(b)) } @@ -2825,6 +2955,7 @@ pub unsafe fn __msa_cle_u_b(a: v16u8, b: v16u8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(cle_u.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_cle_u_h(a: v8u16, b: v8u16) -> v8i16 { msa_cle_u_h(a, mem::transmute(b)) } @@ -2839,6 +2970,7 @@ pub unsafe fn __msa_cle_u_h(a: v8u16, b: v8u16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(cle_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_cle_u_w(a: v4u32, b: v4u32) -> v4i32 { msa_cle_u_w(a, mem::transmute(b)) } @@ -2853,6 +2985,7 @@ pub unsafe fn __msa_cle_u_w(a: v4u32, b: v4u32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(cle_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_cle_u_d(a: v2u64, b: v2u64) -> v2i64 { msa_cle_u_d(a, mem::transmute(b)) } @@ -2868,6 +3001,7 @@ pub unsafe fn __msa_cle_u_d(a: v2u64, b: v2u64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clei_s.b, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clei_s_b(a: v16i8) -> v16i8 { static_assert_simm_bits!(IMM_S5, 5); msa_clei_s_b(a, IMM_S5) @@ -2884,6 +3018,7 @@ pub unsafe fn __msa_clei_s_b(a: v16i8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clei_s.h, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clei_s_h(a: v8i16) -> v8i16 { static_assert_simm_bits!(IMM_S5, 5); msa_clei_s_h(a, IMM_S5) @@ -2900,6 +3035,7 @@ pub unsafe fn __msa_clei_s_h(a: v8i16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clei_s.w, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clei_s_w(a: v4i32) -> v4i32 { static_assert_simm_bits!(IMM_S5, 5); msa_clei_s_w(a, IMM_S5) @@ -2916,6 +3052,7 @@ pub unsafe fn __msa_clei_s_w(a: v4i32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clei_s.d, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clei_s_d(a: v2i64) -> v2i64 { static_assert_simm_bits!(IMM_S5, 5); msa_clei_s_d(a, IMM_S5) @@ -2932,6 +3069,7 @@ pub unsafe fn __msa_clei_s_d(a: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clei_u.b, imm5 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clei_u_b(a: v16u8) -> v16i8 { static_assert_uimm_bits!(IMM5, 5); msa_clei_u_b(a, IMM5) @@ -2948,6 +3086,7 @@ pub unsafe fn __msa_clei_u_b(a: v16u8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clei_u.h, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clei_u_h(a: v8u16) -> v8i16 { static_assert_uimm_bits!(IMM5, 5); msa_clei_u_h(a, IMM5) @@ -2964,6 +3103,7 @@ pub unsafe fn __msa_clei_u_h(a: v8u16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clei_u.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clei_u_w(a: v4u32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); msa_clei_u_w(a, IMM5) @@ -2980,6 +3120,7 @@ pub unsafe fn __msa_clei_u_w(a: v4u32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clei_u.d, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clei_u_d(a: v2u64) -> v2i64 { static_assert_uimm_bits!(IMM5, 5); msa_clei_u_d(a, IMM5) @@ -2995,6 +3136,7 @@ pub unsafe fn __msa_clei_u_d(a: v2u64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clt_s.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clt_s_b(a: v16i8, b: v16i8) -> v16i8 { msa_clt_s_b(a, mem::transmute(b)) } @@ -3009,6 +3151,7 @@ pub unsafe fn __msa_clt_s_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clt_s.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clt_s_h(a: v8i16, b: v8i16) -> v8i16 { msa_clt_s_h(a, mem::transmute(b)) } @@ -3023,6 +3166,7 @@ pub unsafe fn __msa_clt_s_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clt_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clt_s_w(a: v4i32, b: v4i32) -> v4i32 { msa_clt_s_w(a, mem::transmute(b)) } @@ -3037,6 +3181,7 @@ pub unsafe fn __msa_clt_s_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clt_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clt_s_d(a: v2i64, b: v2i64) -> v2i64 { msa_clt_s_d(a, mem::transmute(b)) } @@ -3051,6 +3196,7 @@ pub unsafe fn __msa_clt_s_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clt_u.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clt_u_b(a: v16u8, b: v16u8) -> v16i8 { msa_clt_u_b(a, mem::transmute(b)) } @@ -3065,6 +3211,7 @@ pub unsafe fn __msa_clt_u_b(a: v16u8, b: v16u8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clt_u.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clt_u_h(a: v8u16, b: v8u16) -> v8i16 { msa_clt_u_h(a, mem::transmute(b)) } @@ -3079,6 +3226,7 @@ pub unsafe fn __msa_clt_u_h(a: v8u16, b: v8u16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clt_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clt_u_w(a: v4u32, b: v4u32) -> v4i32 { msa_clt_u_w(a, mem::transmute(b)) } @@ -3093,6 +3241,7 @@ pub unsafe fn __msa_clt_u_w(a: v4u32, b: v4u32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clt_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clt_u_d(a: v2u64, b: v2u64) -> v2i64 { msa_clt_u_d(a, mem::transmute(b)) } @@ -3108,6 +3257,7 @@ pub unsafe fn __msa_clt_u_d(a: v2u64, b: v2u64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clti_s.b, imm_s5 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clti_s_b(a: v16i8) -> v16i8 { static_assert_simm_bits!(IMM_S5, 5); msa_clti_s_b(a, IMM_S5) @@ -3124,6 +3274,7 @@ pub unsafe fn __msa_clti_s_b(a: v16i8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clti_s.h, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clti_s_h(a: v8i16) -> v8i16 { static_assert_simm_bits!(IMM_S5, 5); msa_clti_s_h(a, IMM_S5) @@ -3140,6 +3291,7 @@ pub unsafe fn __msa_clti_s_h(a: v8i16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clti_s.w, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clti_s_w(a: v4i32) -> v4i32 { static_assert_simm_bits!(IMM_S5, 5); msa_clti_s_w(a, IMM_S5) @@ -3156,6 +3308,7 @@ pub unsafe fn __msa_clti_s_w(a: v4i32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clti_s.d, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clti_s_d(a: v2i64) -> v2i64 { static_assert_simm_bits!(IMM_S5, 5); msa_clti_s_d(a, IMM_S5) @@ -3172,6 +3325,7 @@ pub unsafe fn __msa_clti_s_d(a: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clti_u.b, imm5 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clti_u_b(a: v16u8) -> v16i8 { static_assert_uimm_bits!(IMM5, 5); msa_clti_u_b(a, IMM5) @@ -3188,6 +3342,7 @@ pub unsafe fn __msa_clti_u_b(a: v16u8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clti_u.h, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clti_u_h(a: v8u16) -> v8i16 { static_assert_uimm_bits!(IMM5, 5); msa_clti_u_h(a, IMM5) @@ -3204,6 +3359,7 @@ pub unsafe fn __msa_clti_u_h(a: v8u16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clti_u.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clti_u_w(a: v4u32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); msa_clti_u_w(a, IMM5) @@ -3220,6 +3376,7 @@ pub unsafe fn __msa_clti_u_w(a: v4u32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(clti_u.d, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_clti_u_d(a: v2u64) -> v2i64 { static_assert_uimm_bits!(IMM5, 5); msa_clti_u_d(a, IMM5) @@ -3234,6 +3391,7 @@ pub unsafe fn __msa_clti_u_d(a: v2u64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(copy_s.b, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_copy_s_b(a: v16i8) -> i32 { static_assert_uimm_bits!(IMM4, 4); msa_copy_s_b(a, IMM4) @@ -3248,6 +3406,7 @@ pub unsafe fn __msa_copy_s_b(a: v16i8) -> i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(copy_s.h, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_copy_s_h(a: v8i16) -> i32 { static_assert_uimm_bits!(IMM3, 3); msa_copy_s_h(a, IMM3) @@ -3262,6 +3421,7 @@ pub unsafe fn __msa_copy_s_h(a: v8i16) -> i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(copy_s.w, imm2 = 0b11))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_copy_s_w(a: v4i32) -> i32 { static_assert_uimm_bits!(IMM2, 2); msa_copy_s_w(a, IMM2) @@ -3276,6 +3436,7 @@ pub unsafe fn __msa_copy_s_w(a: v4i32) -> i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(copy_s.d, imm1 = 0b1))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_copy_s_d(a: v2i64) -> i64 { static_assert_uimm_bits!(IMM1, 1); msa_copy_s_d(a, IMM1) @@ -3290,6 +3451,7 @@ pub unsafe fn __msa_copy_s_d(a: v2i64) -> i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(copy_u.b, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_copy_u_b(a: v16i8) -> u32 { static_assert_uimm_bits!(IMM4, 4); msa_copy_u_b(a, IMM4) @@ -3304,6 +3466,7 @@ pub unsafe fn __msa_copy_u_b(a: v16i8) -> u32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(copy_u.h, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_copy_u_h(a: v8i16) -> u32 { static_assert_uimm_bits!(IMM3, 3); msa_copy_u_h(a, IMM3) @@ -3318,6 +3481,7 @@ pub unsafe fn __msa_copy_u_h(a: v8i16) -> u32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(copy_u.w, imm2 = 0b11))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_copy_u_w(a: v4i32) -> u32 { static_assert_uimm_bits!(IMM2, 2); msa_copy_u_w(a, IMM2) @@ -3332,6 +3496,7 @@ pub unsafe fn __msa_copy_u_w(a: v4i32) -> u32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(copy_u.d, imm1 = 0b1))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_copy_u_d(a: v2i64) -> u64 { static_assert_uimm_bits!(IMM1, 1); msa_copy_u_d(a, IMM1) @@ -3348,6 +3513,7 @@ pub unsafe fn __msa_copy_u_d(a: v2i64) -> u64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ctcmsa, imm1 = 0b1))] #[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ctcmsa(a: i32) -> () { static_assert_uimm_bits!(IMM5, 5); msa_ctcmsa(IMM5, a) @@ -3362,6 +3528,7 @@ pub unsafe fn __msa_ctcmsa(a: i32) -> () { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(div_s.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_div_s_b(a: v16i8, b: v16i8) -> v16i8 { msa_div_s_b(a, mem::transmute(b)) } @@ -3375,6 +3542,7 @@ pub unsafe fn __msa_div_s_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(div_s.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_div_s_h(a: v8i16, b: v8i16) -> v8i16 { msa_div_s_h(a, mem::transmute(b)) } @@ -3388,6 +3556,7 @@ pub unsafe fn __msa_div_s_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(div_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_div_s_w(a: v4i32, b: v4i32) -> v4i32 { msa_div_s_w(a, mem::transmute(b)) } @@ -3401,6 +3570,7 @@ pub unsafe fn __msa_div_s_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(div_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_div_s_d(a: v2i64, b: v2i64) -> v2i64 { msa_div_s_d(a, mem::transmute(b)) } @@ -3414,6 +3584,7 @@ pub unsafe fn __msa_div_s_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(div_u.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_div_u_b(a: v16u8, b: v16u8) -> v16u8 { msa_div_u_b(a, mem::transmute(b)) } @@ -3427,6 +3598,7 @@ pub unsafe fn __msa_div_u_b(a: v16u8, b: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(div_u.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_div_u_h(a: v8u16, b: v8u16) -> v8u16 { msa_div_u_h(a, mem::transmute(b)) } @@ -3440,6 +3612,7 @@ pub unsafe fn __msa_div_u_h(a: v8u16, b: v8u16) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(div_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_div_u_w(a: v4u32, b: v4u32) -> v4u32 { msa_div_u_w(a, mem::transmute(b)) } @@ -3453,6 +3626,7 @@ pub unsafe fn __msa_div_u_w(a: v4u32, b: v4u32) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(div_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_div_u_d(a: v2u64, b: v2u64) -> v2u64 { msa_div_u_d(a, mem::transmute(b)) } @@ -3468,6 +3642,7 @@ pub unsafe fn __msa_div_u_d(a: v2u64, b: v2u64) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dotp_s.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dotp_s_h(a: v16i8, b: v16i8) -> v8i16 { msa_dotp_s_h(a, mem::transmute(b)) } @@ -3483,6 +3658,7 @@ pub unsafe fn __msa_dotp_s_h(a: v16i8, b: v16i8) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dotp_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dotp_s_w(a: v8i16, b: v8i16) -> v4i32 { msa_dotp_s_w(a, mem::transmute(b)) } @@ -3498,6 +3674,7 @@ pub unsafe fn __msa_dotp_s_w(a: v8i16, b: v8i16) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dotp_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dotp_s_d(a: v4i32, b: v4i32) -> v2i64 { msa_dotp_s_d(a, mem::transmute(b)) } @@ -3513,6 +3690,7 @@ pub unsafe fn __msa_dotp_s_d(a: v4i32, b: v4i32) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dotp_u.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dotp_u_h(a: v16u8, b: v16u8) -> v8u16 { msa_dotp_u_h(a, mem::transmute(b)) } @@ -3528,6 +3706,7 @@ pub unsafe fn __msa_dotp_u_h(a: v16u8, b: v16u8) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dotp_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dotp_u_w(a: v8u16, b: v8u16) -> v4u32 { msa_dotp_u_w(a, mem::transmute(b)) } @@ -3543,6 +3722,7 @@ pub unsafe fn __msa_dotp_u_w(a: v8u16, b: v8u16) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dotp_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dotp_u_d(a: v4u32, b: v4u32) -> v2u64 { msa_dotp_u_d(a, mem::transmute(b)) } @@ -3557,6 +3737,7 @@ pub unsafe fn __msa_dotp_u_d(a: v4u32, b: v4u32) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpadd_s.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dpadd_s_h(a: v8i16, b: v16i8, c: v16i8) -> v8i16 { msa_dpadd_s_h(a, mem::transmute(b), c) } @@ -3571,6 +3752,7 @@ pub unsafe fn __msa_dpadd_s_h(a: v8i16, b: v16i8, c: v16i8) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpadd_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dpadd_s_w(a: v4i32, b: v8i16, c: v8i16) -> v4i32 { msa_dpadd_s_w(a, mem::transmute(b), c) } @@ -3585,6 +3767,7 @@ pub unsafe fn __msa_dpadd_s_w(a: v4i32, b: v8i16, c: v8i16) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpadd_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dpadd_s_d(a: v2i64, b: v4i32, c: v4i32) -> v2i64 { msa_dpadd_s_d(a, mem::transmute(b), c) } @@ -3599,6 +3782,7 @@ pub unsafe fn __msa_dpadd_s_d(a: v2i64, b: v4i32, c: v4i32) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpadd_u.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dpadd_u_h(a: v8u16, b: v16u8, c: v16u8) -> v8u16 { msa_dpadd_u_h(a, mem::transmute(b), c) } @@ -3613,6 +3797,7 @@ pub unsafe fn __msa_dpadd_u_h(a: v8u16, b: v16u8, c: v16u8) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpadd_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dpadd_u_w(a: v4u32, b: v8u16, c: v8u16) -> v4u32 { msa_dpadd_u_w(a, mem::transmute(b), c) } @@ -3627,6 +3812,7 @@ pub unsafe fn __msa_dpadd_u_w(a: v4u32, b: v8u16, c: v8u16) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpadd_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dpadd_u_d(a: v2u64, b: v4u32, c: v4u32) -> v2u64 { msa_dpadd_u_d(a, mem::transmute(b), c) } @@ -3642,6 +3828,7 @@ pub unsafe fn __msa_dpadd_u_d(a: v2u64, b: v4u32, c: v4u32) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpsub_s.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dpsub_s_h(a: v8i16, b: v16i8, c: v16i8) -> v8i16 { msa_dpsub_s_h(a, mem::transmute(b), c) } @@ -3657,6 +3844,7 @@ pub unsafe fn __msa_dpsub_s_h(a: v8i16, b: v16i8, c: v16i8) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpsub_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dpsub_s_w(a: v4i32, b: v8i16, c: v8i16) -> v4i32 { msa_dpsub_s_w(a, mem::transmute(b), c) } @@ -3672,6 +3860,7 @@ pub unsafe fn __msa_dpsub_s_w(a: v4i32, b: v8i16, c: v8i16) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpsub_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dpsub_s_d(a: v2i64, b: v4i32, c: v4i32) -> v2i64 { msa_dpsub_s_d(a, mem::transmute(b), c) } @@ -3687,6 +3876,7 @@ pub unsafe fn __msa_dpsub_s_d(a: v2i64, b: v4i32, c: v4i32) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpsub_u.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dpsub_u_h(a: v8i16, b: v16u8, c: v16u8) -> v8i16 { msa_dpsub_u_h(a, mem::transmute(b), c) } @@ -3702,6 +3892,7 @@ pub unsafe fn __msa_dpsub_u_h(a: v8i16, b: v16u8, c: v16u8) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpsub_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dpsub_u_w(a: v4i32, b: v8u16, c: v8u16) -> v4i32 { msa_dpsub_u_w(a, mem::transmute(b), c) } @@ -3717,6 +3908,7 @@ pub unsafe fn __msa_dpsub_u_w(a: v4i32, b: v8u16, c: v8u16) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(dpsub_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_dpsub_u_d(a: v2i64, b: v4u32, c: v4u32) -> v2i64 { msa_dpsub_u_d(a, mem::transmute(b), c) } @@ -3730,6 +3922,7 @@ pub unsafe fn __msa_dpsub_u_d(a: v2i64, b: v4u32, c: v4u32) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fadd.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fadd_w(a: v4f32, b: v4f32) -> v4f32 { msa_fadd_w(a, mem::transmute(b)) } @@ -3743,6 +3936,7 @@ pub unsafe fn __msa_fadd_w(a: v4f32, b: v4f32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fadd.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fadd_d(a: v2f64, b: v2f64) -> v2f64 { msa_fadd_d(a, mem::transmute(b)) } @@ -3756,6 +3950,7 @@ pub unsafe fn __msa_fadd_d(a: v2f64, b: v2f64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcaf.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcaf_w(a: v4f32, b: v4f32) -> v4i32 { msa_fcaf_w(a, mem::transmute(b)) } @@ -3769,6 +3964,7 @@ pub unsafe fn __msa_fcaf_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcaf.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcaf_d(a: v2f64, b: v2f64) -> v2i64 { msa_fcaf_d(a, mem::transmute(b)) } @@ -3783,6 +3979,7 @@ pub unsafe fn __msa_fcaf_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fceq.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fceq_w(a: v4f32, b: v4f32) -> v4i32 { msa_fceq_w(a, mem::transmute(b)) } @@ -3797,6 +3994,7 @@ pub unsafe fn __msa_fceq_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fceq.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fceq_d(a: v2f64, b: v2f64) -> v2i64 { msa_fceq_d(a, mem::transmute(b)) } @@ -3813,6 +4011,7 @@ pub unsafe fn __msa_fceq_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fclass.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fclass_w(a: v4f32) -> v4i32 { msa_fclass_w(a) } @@ -3829,6 +4028,7 @@ pub unsafe fn __msa_fclass_w(a: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fclass.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fclass_d(a: v2f64) -> v2i64 { msa_fclass_d(a) } @@ -3843,6 +4043,7 @@ pub unsafe fn __msa_fclass_d(a: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcle.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcle_w(a: v4f32, b: v4f32) -> v4i32 { msa_fcle_w(a, mem::transmute(b)) } @@ -3857,6 +4058,7 @@ pub unsafe fn __msa_fcle_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcle.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcle_d(a: v2f64, b: v2f64) -> v2i64 { msa_fcle_d(a, mem::transmute(b)) } @@ -3871,6 +4073,7 @@ pub unsafe fn __msa_fcle_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fclt.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fclt_w(a: v4f32, b: v4f32) -> v4i32 { msa_fclt_w(a, mem::transmute(b)) } @@ -3885,6 +4088,7 @@ pub unsafe fn __msa_fclt_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fclt.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fclt_d(a: v2f64, b: v2f64) -> v2i64 { msa_fclt_d(a, mem::transmute(b)) } @@ -3899,6 +4103,7 @@ pub unsafe fn __msa_fclt_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcne.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcne_w(a: v4f32, b: v4f32) -> v4i32 { msa_fcne_w(a, mem::transmute(b)) } @@ -3913,6 +4118,7 @@ pub unsafe fn __msa_fcne_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcne.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcne_d(a: v2f64, b: v2f64) -> v2i64 { msa_fcne_d(a, mem::transmute(b)) } @@ -3927,6 +4133,7 @@ pub unsafe fn __msa_fcne_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcor.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcor_w(a: v4f32, b: v4f32) -> v4i32 { msa_fcor_w(a, mem::transmute(b)) } @@ -3941,6 +4148,7 @@ pub unsafe fn __msa_fcor_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcor.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcor_d(a: v2f64, b: v2f64) -> v2i64 { msa_fcor_d(a, mem::transmute(b)) } @@ -3955,6 +4163,7 @@ pub unsafe fn __msa_fcor_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcueq.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcueq_w(a: v4f32, b: v4f32) -> v4i32 { msa_fcueq_w(a, mem::transmute(b)) } @@ -3969,6 +4178,7 @@ pub unsafe fn __msa_fcueq_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcueq.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcueq_d(a: v2f64, b: v2f64) -> v2i64 { msa_fcueq_d(a, mem::transmute(b)) } @@ -3983,6 +4193,7 @@ pub unsafe fn __msa_fcueq_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcule.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcule_w(a: v4f32, b: v4f32) -> v4i32 { msa_fcule_w(a, mem::transmute(b)) } @@ -3997,6 +4208,7 @@ pub unsafe fn __msa_fcule_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcule.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcule_d(a: v2f64, b: v2f64) -> v2i64 { msa_fcule_d(a, mem::transmute(b)) } @@ -4011,6 +4223,7 @@ pub unsafe fn __msa_fcule_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcult.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcult_w(a: v4f32, b: v4f32) -> v4i32 { msa_fcult_w(a, mem::transmute(b)) } @@ -4025,6 +4238,7 @@ pub unsafe fn __msa_fcult_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcult.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcult_d(a: v2f64, b: v2f64) -> v2i64 { msa_fcult_d(a, mem::transmute(b)) } @@ -4039,6 +4253,7 @@ pub unsafe fn __msa_fcult_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcun.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcun_w(a: v4f32, b: v4f32) -> v4i32 { msa_fcun_w(a, mem::transmute(b)) } @@ -4053,6 +4268,7 @@ pub unsafe fn __msa_fcun_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcun.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcun_d(a: v2f64, b: v2f64) -> v2i64 { msa_fcun_d(a, mem::transmute(b)) } @@ -4067,6 +4283,7 @@ pub unsafe fn __msa_fcun_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcune.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcune_w(a: v4f32, b: v4f32) -> v4i32 { msa_fcune_w(a, mem::transmute(b)) } @@ -4081,6 +4298,7 @@ pub unsafe fn __msa_fcune_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fcune.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fcune_d(a: v2f64, b: v2f64) -> v2i64 { msa_fcune_d(a, mem::transmute(b)) } @@ -4094,6 +4312,7 @@ pub unsafe fn __msa_fcune_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fdiv.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fdiv_w(a: v4f32, b: v4f32) -> v4f32 { msa_fdiv_w(a, mem::transmute(b)) } @@ -4107,6 +4326,7 @@ pub unsafe fn __msa_fdiv_w(a: v4f32, b: v4f32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fdiv.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fdiv_d(a: v2f64, b: v2f64) -> v2f64 { msa_fdiv_d(a, mem::transmute(b)) } @@ -4122,7 +4342,7 @@ pub unsafe fn __msa_fdiv_d(a: v2f64, b: v2f64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fexdo.h))] -pub unsafe fn __msa_fexdo_h(a: v4f32, b: v4f32) -> f16x8 { + #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fexdo_h(a: v4f32, b: v4f32) -> f16x8 { msa_fexdo_h(a, mem::transmute(b)) }*/ @@ -4136,6 +4356,7 @@ pub unsafe fn __msa_fexdo_h(a: v4f32, b: v4f32) -> f16x8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fexdo.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fexdo_w(a: v2f64, b: v2f64) -> v4f32 { msa_fexdo_w(a, mem::transmute(b)) } @@ -4150,6 +4371,7 @@ pub unsafe fn __msa_fexdo_w(a: v2f64, b: v2f64) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fexp2.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fexp2_w(a: v4f32, b: v4i32) -> v4f32 { msa_fexp2_w(a, mem::transmute(b)) } @@ -4164,6 +4386,7 @@ pub unsafe fn __msa_fexp2_w(a: v4f32, b: v4i32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fexp2.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fexp2_d(a: v2f64, b: v2i64) -> v2f64 { msa_fexp2_d(a, mem::transmute(b)) } @@ -4179,7 +4402,7 @@ pub unsafe fn __msa_fexp2_d(a: v2f64, b: v2i64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fexupl.w))] -pub unsafe fn __msa_fexupl_w(a: f16x8) -> v4f32 { + #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fexupl_w(a: f16x8) -> v4f32 { msa_fexupl_w(a) }*/ @@ -4193,6 +4416,7 @@ pub unsafe fn __msa_fexupl_w(a: f16x8) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fexupl.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fexupl_d(a: v4f32) -> v2f64 { msa_fexupl_d(a) } @@ -4208,7 +4432,7 @@ pub unsafe fn __msa_fexupl_d(a: v4f32) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fexupr.w))] -pub unsafe fn __msa_fexupr_w(a: f16x8) -> v4f32 { + #[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fexupr_w(a: f16x8) -> v4f32 { msa_fexupr_w(a) } */ @@ -4222,6 +4446,7 @@ pub unsafe fn __msa_fexupr_w(a: f16x8) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fexupr.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fexupr_d(a: v4f32) -> v2f64 { msa_fexupr_d(a) } @@ -4235,6 +4460,7 @@ pub unsafe fn __msa_fexupr_d(a: v4f32) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ffint_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ffint_s_w(a: v4i32) -> v4f32 { msa_ffint_s_w(a) } @@ -4248,6 +4474,7 @@ pub unsafe fn __msa_ffint_s_w(a: v4i32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ffint_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ffint_s_d(a: v2i64) -> v2f64 { msa_ffint_s_d(a) } @@ -4261,6 +4488,7 @@ pub unsafe fn __msa_ffint_s_d(a: v2i64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ffint_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ffint_u_w(a: v4u32) -> v4f32 { msa_ffint_u_w(a) } @@ -4274,6 +4502,7 @@ pub unsafe fn __msa_ffint_u_w(a: v4u32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ffint_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ffint_u_d(a: v2u64) -> v2f64 { msa_ffint_u_d(a) } @@ -4288,6 +4517,7 @@ pub unsafe fn __msa_ffint_u_d(a: v2u64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ffql.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ffql_w(a: v8i16) -> v4f32 { msa_ffql_w(a) } @@ -4302,6 +4532,7 @@ pub unsafe fn __msa_ffql_w(a: v8i16) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ffql.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ffql_d(a: v4i32) -> v2f64 { msa_ffql_d(a) } @@ -4316,6 +4547,7 @@ pub unsafe fn __msa_ffql_d(a: v4i32) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ffqr.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ffqr_w(a: v8i16) -> v4f32 { msa_ffqr_w(a) } @@ -4330,6 +4562,7 @@ pub unsafe fn __msa_ffqr_w(a: v8i16) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ffqr.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ffqr_d(a: v4i32) -> v2f64 { msa_ffqr_d(a) } @@ -4343,6 +4576,7 @@ pub unsafe fn __msa_ffqr_d(a: v4i32) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fill.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fill_b(a: i32) -> v16i8 { msa_fill_b(a) } @@ -4356,6 +4590,7 @@ pub unsafe fn __msa_fill_b(a: i32) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fill.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fill_h(a: i32) -> v8i16 { msa_fill_h(a) } @@ -4369,6 +4604,7 @@ pub unsafe fn __msa_fill_h(a: i32) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fill.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fill_w(a: i32) -> v4i32 { msa_fill_w(a) } @@ -4382,6 +4618,7 @@ pub unsafe fn __msa_fill_w(a: i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fill.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fill_d(a: i64) -> v2i64 { msa_fill_d(a) } @@ -4395,6 +4632,7 @@ pub unsafe fn __msa_fill_d(a: i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(flog2.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_flog2_w(a: v4f32) -> v4f32 { msa_flog2_w(a) } @@ -4408,6 +4646,7 @@ pub unsafe fn __msa_flog2_w(a: v4f32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(flog2.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_flog2_d(a: v2f64) -> v2f64 { msa_flog2_d(a) } @@ -4421,6 +4660,7 @@ pub unsafe fn __msa_flog2_d(a: v2f64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmadd.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fmadd_w(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { msa_fmadd_w(a, mem::transmute(b), c) } @@ -4434,6 +4674,7 @@ pub unsafe fn __msa_fmadd_w(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmadd.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { msa_fmadd_d(a, mem::transmute(b), c) } @@ -4447,6 +4688,7 @@ pub unsafe fn __msa_fmadd_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmax.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fmax_w(a: v4f32, b: v4f32) -> v4f32 { msa_fmax_w(a, mem::transmute(b)) } @@ -4460,6 +4702,7 @@ pub unsafe fn __msa_fmax_w(a: v4f32, b: v4f32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmax.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fmax_d(a: v2f64, b: v2f64) -> v2f64 { msa_fmax_d(a, mem::transmute(b)) } @@ -4474,6 +4717,7 @@ pub unsafe fn __msa_fmax_d(a: v2f64, b: v2f64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmax_a.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fmax_a_w(a: v4f32, b: v4f32) -> v4f32 { msa_fmax_a_w(a, mem::transmute(b)) } @@ -4488,6 +4732,7 @@ pub unsafe fn __msa_fmax_a_w(a: v4f32, b: v4f32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmax_a.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fmax_a_d(a: v2f64, b: v2f64) -> v2f64 { msa_fmax_a_d(a, mem::transmute(b)) } @@ -4501,6 +4746,7 @@ pub unsafe fn __msa_fmax_a_d(a: v2f64, b: v2f64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmin.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fmin_w(a: v4f32, b: v4f32) -> v4f32 { msa_fmin_w(a, mem::transmute(b)) } @@ -4514,6 +4760,7 @@ pub unsafe fn __msa_fmin_w(a: v4f32, b: v4f32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmin.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fmin_d(a: v2f64, b: v2f64) -> v2f64 { msa_fmin_d(a, mem::transmute(b)) } @@ -4528,6 +4775,7 @@ pub unsafe fn __msa_fmin_d(a: v2f64, b: v2f64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmin_a.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fmin_a_w(a: v4f32, b: v4f32) -> v4f32 { msa_fmin_a_w(a, mem::transmute(b)) } @@ -4542,6 +4790,7 @@ pub unsafe fn __msa_fmin_a_w(a: v4f32, b: v4f32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmin_a.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fmin_a_d(a: v2f64, b: v2f64) -> v2f64 { msa_fmin_a_d(a, mem::transmute(b)) } @@ -4555,6 +4804,7 @@ pub unsafe fn __msa_fmin_a_d(a: v2f64, b: v2f64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmsub.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fmsub_w(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { msa_fmsub_w(a, mem::transmute(b), c) } @@ -4568,6 +4818,7 @@ pub unsafe fn __msa_fmsub_w(a: v4f32, b: v4f32, c: v4f32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmsub.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { msa_fmsub_d(a, mem::transmute(b), c) } @@ -4580,6 +4831,7 @@ pub unsafe fn __msa_fmsub_d(a: v2f64, b: v2f64, c: v2f64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmul.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fmul_w(a: v4f32, b: v4f32) -> v4f32 { msa_fmul_w(a, mem::transmute(b)) } @@ -4592,6 +4844,7 @@ pub unsafe fn __msa_fmul_w(a: v4f32, b: v4f32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fmul.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fmul_d(a: v2f64, b: v2f64) -> v2f64 { msa_fmul_d(a, mem::transmute(b)) } @@ -4605,6 +4858,7 @@ pub unsafe fn __msa_fmul_d(a: v2f64, b: v2f64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(frint.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_frint_w(a: v4f32) -> v4f32 { msa_frint_w(a) } @@ -4618,6 +4872,7 @@ pub unsafe fn __msa_frint_w(a: v4f32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(frint.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_frint_d(a: v2f64) -> v2f64 { msa_frint_d(a) } @@ -4630,6 +4885,7 @@ pub unsafe fn __msa_frint_d(a: v2f64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(frcp.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_frcp_w(a: v4f32) -> v4f32 { msa_frcp_w(a) } @@ -4642,6 +4898,7 @@ pub unsafe fn __msa_frcp_w(a: v4f32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(frcp.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_frcp_d(a: v2f64) -> v2f64 { msa_frcp_d(a) } @@ -4654,6 +4911,7 @@ pub unsafe fn __msa_frcp_d(a: v2f64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(frsqrt.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_frsqrt_w(a: v4f32) -> v4f32 { msa_frsqrt_w(a) } @@ -4666,6 +4924,7 @@ pub unsafe fn __msa_frsqrt_w(a: v4f32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(frsqrt.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_frsqrt_d(a: v2f64) -> v2f64 { msa_frsqrt_d(a) } @@ -4680,6 +4939,7 @@ pub unsafe fn __msa_frsqrt_d(a: v2f64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsaf.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsaf_w(a: v4f32, b: v4f32) -> v4i32 { msa_fsaf_w(a, mem::transmute(b)) } @@ -4694,6 +4954,7 @@ pub unsafe fn __msa_fsaf_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsaf.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsaf_d(a: v2f64, b: v2f64) -> v2i64 { msa_fsaf_d(a, mem::transmute(b)) } @@ -4707,6 +4968,7 @@ pub unsafe fn __msa_fsaf_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fseq.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fseq_w(a: v4f32, b: v4f32) -> v4i32 { msa_fseq_w(a, mem::transmute(b)) } @@ -4720,6 +4982,7 @@ pub unsafe fn __msa_fseq_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fseq.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fseq_d(a: v2f64, b: v2f64) -> v2i64 { msa_fseq_d(a, mem::transmute(b)) } @@ -4733,6 +4996,7 @@ pub unsafe fn __msa_fseq_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsle.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsle_w(a: v4f32, b: v4f32) -> v4i32 { msa_fsle_w(a, mem::transmute(b)) } @@ -4746,6 +5010,7 @@ pub unsafe fn __msa_fsle_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsle.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsle_d(a: v2f64, b: v2f64) -> v2i64 { msa_fsle_d(a, mem::transmute(b)) } @@ -4759,6 +5024,7 @@ pub unsafe fn __msa_fsle_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fslt.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fslt_w(a: v4f32, b: v4f32) -> v4i32 { msa_fslt_w(a, mem::transmute(b)) } @@ -4772,6 +5038,7 @@ pub unsafe fn __msa_fslt_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fslt.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fslt_d(a: v2f64, b: v2f64) -> v2i64 { msa_fslt_d(a, mem::transmute(b)) } @@ -4785,6 +5052,7 @@ pub unsafe fn __msa_fslt_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsne.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsne_w(a: v4f32, b: v4f32) -> v4i32 { msa_fsne_w(a, mem::transmute(b)) } @@ -4798,6 +5066,7 @@ pub unsafe fn __msa_fsne_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsne.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsne_d(a: v2f64, b: v2f64) -> v2i64 { msa_fsne_d(a, mem::transmute(b)) } @@ -4812,6 +5081,7 @@ pub unsafe fn __msa_fsne_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsor.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsor_w(a: v4f32, b: v4f32) -> v4i32 { msa_fsor_w(a, mem::transmute(b)) } @@ -4826,6 +5096,7 @@ pub unsafe fn __msa_fsor_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsor.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsor_d(a: v2f64, b: v2f64) -> v2i64 { msa_fsor_d(a, mem::transmute(b)) } @@ -4839,6 +5110,7 @@ pub unsafe fn __msa_fsor_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsqrt.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsqrt_w(a: v4f32) -> v4f32 { msa_fsqrt_w(a) } @@ -4852,6 +5124,7 @@ pub unsafe fn __msa_fsqrt_w(a: v4f32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsqrt.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsqrt_d(a: v2f64) -> v2f64 { msa_fsqrt_d(a) } @@ -4866,6 +5139,7 @@ pub unsafe fn __msa_fsqrt_d(a: v2f64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsub.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsub_w(a: v4f32, b: v4f32) -> v4f32 { msa_fsub_w(a, mem::transmute(b)) } @@ -4880,6 +5154,7 @@ pub unsafe fn __msa_fsub_w(a: v4f32, b: v4f32) -> v4f32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsub.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsub_d(a: v2f64, b: v2f64) -> v2f64 { msa_fsub_d(a, mem::transmute(b)) } @@ -4894,6 +5169,7 @@ pub unsafe fn __msa_fsub_d(a: v2f64, b: v2f64) -> v2f64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsueq.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsueq_w(a: v4f32, b: v4f32) -> v4i32 { msa_fsueq_w(a, mem::transmute(b)) } @@ -4908,6 +5184,7 @@ pub unsafe fn __msa_fsueq_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsueq.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsueq_d(a: v2f64, b: v2f64) -> v2i64 { msa_fsueq_d(a, mem::transmute(b)) } @@ -4922,6 +5199,7 @@ pub unsafe fn __msa_fsueq_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsule.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsule_w(a: v4f32, b: v4f32) -> v4i32 { msa_fsule_w(a, mem::transmute(b)) } @@ -4936,6 +5214,7 @@ pub unsafe fn __msa_fsule_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsule.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsule_d(a: v2f64, b: v2f64) -> v2i64 { msa_fsule_d(a, mem::transmute(b)) } @@ -4950,6 +5229,7 @@ pub unsafe fn __msa_fsule_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsult.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsult_w(a: v4f32, b: v4f32) -> v4i32 { msa_fsult_w(a, mem::transmute(b)) } @@ -4964,6 +5244,7 @@ pub unsafe fn __msa_fsult_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsult.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsult_d(a: v2f64, b: v2f64) -> v2i64 { msa_fsult_d(a, mem::transmute(b)) } @@ -4978,6 +5259,7 @@ pub unsafe fn __msa_fsult_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsun.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsun_w(a: v4f32, b: v4f32) -> v4i32 { msa_fsun_w(a, mem::transmute(b)) } @@ -4992,6 +5274,7 @@ pub unsafe fn __msa_fsun_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsun.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsun_d(a: v2f64, b: v2f64) -> v2i64 { msa_fsun_d(a, mem::transmute(b)) } @@ -5006,6 +5289,7 @@ pub unsafe fn __msa_fsun_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsune.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsune_w(a: v4f32, b: v4f32) -> v4i32 { msa_fsune_w(a, mem::transmute(b)) } @@ -5020,6 +5304,7 @@ pub unsafe fn __msa_fsune_w(a: v4f32, b: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(fsune.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_fsune_d(a: v2f64, b: v2f64) -> v2i64 { msa_fsune_d(a, mem::transmute(b)) } @@ -5034,6 +5319,7 @@ pub unsafe fn __msa_fsune_d(a: v2f64, b: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftint_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ftint_s_w(a: v4f32) -> v4i32 { msa_ftint_s_w(a) } @@ -5048,6 +5334,7 @@ pub unsafe fn __msa_ftint_s_w(a: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftint_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ftint_s_d(a: v2f64) -> v2i64 { msa_ftint_s_d(a) } @@ -5062,6 +5349,7 @@ pub unsafe fn __msa_ftint_s_d(a: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftint_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ftint_u_w(a: v4f32) -> v4u32 { msa_ftint_u_w(a) } @@ -5076,6 +5364,7 @@ pub unsafe fn __msa_ftint_u_w(a: v4f32) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftint_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ftint_u_d(a: v2f64) -> v2u64 { msa_ftint_u_d(a) } @@ -5091,6 +5380,7 @@ pub unsafe fn __msa_ftint_u_d(a: v2f64) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftq.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ftq_h(a: v4f32, b: v4f32) -> v8i16 { msa_ftq_h(a, mem::transmute(b)) } @@ -5106,6 +5396,7 @@ pub unsafe fn __msa_ftq_h(a: v4f32, b: v4f32) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftq.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ftq_w(a: v2f64, b: v2f64) -> v4i32 { msa_ftq_w(a, mem::transmute(b)) } @@ -5119,6 +5410,7 @@ pub unsafe fn __msa_ftq_w(a: v2f64, b: v2f64) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftrunc_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ftrunc_s_w(a: v4f32) -> v4i32 { msa_ftrunc_s_w(a) } @@ -5132,6 +5424,7 @@ pub unsafe fn __msa_ftrunc_s_w(a: v4f32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftrunc_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ftrunc_s_d(a: v2f64) -> v2i64 { msa_ftrunc_s_d(a) } @@ -5145,6 +5438,7 @@ pub unsafe fn __msa_ftrunc_s_d(a: v2f64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftrunc_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ftrunc_u_w(a: v4f32) -> v4u32 { msa_ftrunc_u_w(a) } @@ -5158,6 +5452,7 @@ pub unsafe fn __msa_ftrunc_u_w(a: v4f32) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ftrunc_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ftrunc_u_d(a: v2f64) -> v2u64 { msa_ftrunc_u_d(a) } @@ -5172,6 +5467,7 @@ pub unsafe fn __msa_ftrunc_u_d(a: v2f64) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hadd_s.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_hadd_s_h(a: v16i8, b: v16i8) -> v8i16 { msa_hadd_s_h(a, mem::transmute(b)) } @@ -5186,6 +5482,7 @@ pub unsafe fn __msa_hadd_s_h(a: v16i8, b: v16i8) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hadd_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_hadd_s_w(a: v8i16, b: v8i16) -> v4i32 { msa_hadd_s_w(a, mem::transmute(b)) } @@ -5200,6 +5497,7 @@ pub unsafe fn __msa_hadd_s_w(a: v8i16, b: v8i16) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hadd_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_hadd_s_d(a: v4i32, b: v4i32) -> v2i64 { msa_hadd_s_d(a, mem::transmute(b)) } @@ -5214,6 +5512,7 @@ pub unsafe fn __msa_hadd_s_d(a: v4i32, b: v4i32) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hadd_u.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_hadd_u_h(a: v16u8, b: v16u8) -> v8u16 { msa_hadd_u_h(a, mem::transmute(b)) } @@ -5228,6 +5527,7 @@ pub unsafe fn __msa_hadd_u_h(a: v16u8, b: v16u8) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hadd_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_hadd_u_w(a: v8u16, b: v8u16) -> v4u32 { msa_hadd_u_w(a, mem::transmute(b)) } @@ -5242,6 +5542,7 @@ pub unsafe fn __msa_hadd_u_w(a: v8u16, b: v8u16) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hadd_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_hadd_u_d(a: v4u32, b: v4u32) -> v2u64 { msa_hadd_u_d(a, mem::transmute(b)) } @@ -5256,6 +5557,7 @@ pub unsafe fn __msa_hadd_u_d(a: v4u32, b: v4u32) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hsub_s.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_hsub_s_h(a: v16i8, b: v16i8) -> v8i16 { msa_hsub_s_h(a, mem::transmute(b)) } @@ -5270,6 +5572,7 @@ pub unsafe fn __msa_hsub_s_h(a: v16i8, b: v16i8) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hsub_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_hsub_s_w(a: v8i16, b: v8i16) -> v4i32 { msa_hsub_s_w(a, mem::transmute(b)) } @@ -5284,6 +5587,7 @@ pub unsafe fn __msa_hsub_s_w(a: v8i16, b: v8i16) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hsub_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_hsub_s_d(a: v4i32, b: v4i32) -> v2i64 { msa_hsub_s_d(a, mem::transmute(b)) } @@ -5298,6 +5602,7 @@ pub unsafe fn __msa_hsub_s_d(a: v4i32, b: v4i32) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hsub_u.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_hsub_u_h(a: v16u8, b: v16u8) -> v8i16 { msa_hsub_u_h(a, mem::transmute(b)) } @@ -5312,6 +5617,7 @@ pub unsafe fn __msa_hsub_u_h(a: v16u8, b: v16u8) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hsub_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_hsub_u_w(a: v8u16, b: v8u16) -> v4i32 { msa_hsub_u_w(a, mem::transmute(b)) } @@ -5326,6 +5632,7 @@ pub unsafe fn __msa_hsub_u_w(a: v8u16, b: v8u16) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(hsub_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_hsub_u_d(a: v4u32, b: v4u32) -> v2i64 { msa_hsub_u_d(a, mem::transmute(b)) } @@ -5340,6 +5647,7 @@ pub unsafe fn __msa_hsub_u_d(a: v4u32, b: v4u32) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvev.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ilvev_b(a: v16i8, b: v16i8) -> v16i8 { msa_ilvev_b(a, mem::transmute(b)) } @@ -5354,6 +5662,7 @@ pub unsafe fn __msa_ilvev_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvev.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ilvev_h(a: v8i16, b: v8i16) -> v8i16 { msa_ilvev_h(a, mem::transmute(b)) } @@ -5368,6 +5677,7 @@ pub unsafe fn __msa_ilvev_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvev.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ilvev_w(a: v4i32, b: v4i32) -> v4i32 { msa_ilvev_w(a, mem::transmute(b)) } @@ -5382,6 +5692,7 @@ pub unsafe fn __msa_ilvev_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvev.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ilvev_d(a: v2i64, b: v2i64) -> v2i64 { msa_ilvev_d(a, mem::transmute(b)) } @@ -5396,6 +5707,7 @@ pub unsafe fn __msa_ilvev_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvl.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ilvl_b(a: v16i8, b: v16i8) -> v16i8 { msa_ilvl_b(a, mem::transmute(b)) } @@ -5410,6 +5722,7 @@ pub unsafe fn __msa_ilvl_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvl.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ilvl_h(a: v8i16, b: v8i16) -> v8i16 { msa_ilvl_h(a, mem::transmute(b)) } @@ -5424,6 +5737,7 @@ pub unsafe fn __msa_ilvl_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvl.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ilvl_w(a: v4i32, b: v4i32) -> v4i32 { msa_ilvl_w(a, mem::transmute(b)) } @@ -5438,6 +5752,7 @@ pub unsafe fn __msa_ilvl_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvl.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ilvl_d(a: v2i64, b: v2i64) -> v2i64 { msa_ilvl_d(a, mem::transmute(b)) } @@ -5452,6 +5767,7 @@ pub unsafe fn __msa_ilvl_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvod.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ilvod_b(a: v16i8, b: v16i8) -> v16i8 { msa_ilvod_b(a, mem::transmute(b)) } @@ -5466,6 +5782,7 @@ pub unsafe fn __msa_ilvod_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvod.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ilvod_h(a: v8i16, b: v8i16) -> v8i16 { msa_ilvod_h(a, mem::transmute(b)) } @@ -5480,6 +5797,7 @@ pub unsafe fn __msa_ilvod_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvod.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ilvod_w(a: v4i32, b: v4i32) -> v4i32 { msa_ilvod_w(a, mem::transmute(b)) } @@ -5494,6 +5812,7 @@ pub unsafe fn __msa_ilvod_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvod.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ilvod_d(a: v2i64, b: v2i64) -> v2i64 { msa_ilvod_d(a, mem::transmute(b)) } @@ -5508,6 +5827,7 @@ pub unsafe fn __msa_ilvod_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvr.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ilvr_b(a: v16i8, b: v16i8) -> v16i8 { msa_ilvr_b(a, mem::transmute(b)) } @@ -5522,6 +5842,7 @@ pub unsafe fn __msa_ilvr_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvr.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ilvr_h(a: v8i16, b: v8i16) -> v8i16 { msa_ilvr_h(a, mem::transmute(b)) } @@ -5536,6 +5857,7 @@ pub unsafe fn __msa_ilvr_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvr.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ilvr_w(a: v4i32, b: v4i32) -> v4i32 { msa_ilvr_w(a, mem::transmute(b)) } @@ -5550,6 +5872,7 @@ pub unsafe fn __msa_ilvr_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ilvr.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ilvr_d(a: v2i64, b: v2i64) -> v2i64 { msa_ilvr_d(a, mem::transmute(b)) } @@ -5564,6 +5887,7 @@ pub unsafe fn __msa_ilvr_d(a: v2i64, b: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(insert.b, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_insert_b(a: v16i8, c: i32) -> v16i8 { static_assert_uimm_bits!(IMM4, 4); msa_insert_b(a, IMM4, c) @@ -5579,6 +5903,7 @@ pub unsafe fn __msa_insert_b(a: v16i8, c: i32) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(insert.h, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_insert_h(a: v8i16, c: i32) -> v8i16 { static_assert_uimm_bits!(IMM3, 3); msa_insert_h(a, IMM3, c) @@ -5594,6 +5919,7 @@ pub unsafe fn __msa_insert_h(a: v8i16, c: i32) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(insert.w, imm2 = 0b11))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_insert_w(a: v4i32, c: i32) -> v4i32 { static_assert_uimm_bits!(IMM2, 2); msa_insert_w(a, IMM2, c) @@ -5609,6 +5935,7 @@ pub unsafe fn __msa_insert_w(a: v4i32, c: i32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(insert.d, imm1 = 0b1))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_insert_d(a: v2i64, c: i64) -> v2i64 { static_assert_uimm_bits!(IMM1, 1); msa_insert_d(a, IMM1, c) @@ -5624,6 +5951,7 @@ pub unsafe fn __msa_insert_d(a: v2i64, c: i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(insve.b, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_insve_b(a: v16i8, c: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM4, 4); msa_insve_b(a, IMM4, c) @@ -5639,6 +5967,7 @@ pub unsafe fn __msa_insve_b(a: v16i8, c: v16i8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(insve.h, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_insve_h(a: v8i16, c: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM3, 3); msa_insve_h(a, IMM3, c) @@ -5654,6 +5983,7 @@ pub unsafe fn __msa_insve_h(a: v8i16, c: v8i16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(insve.w, imm2 = 0b11))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_insve_w(a: v4i32, c: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM2, 2); msa_insve_w(a, IMM2, c) @@ -5669,6 +5999,7 @@ pub unsafe fn __msa_insve_w(a: v4i32, c: v4i32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(insve.d, imm1 = 0b1))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_insve_d(a: v2i64, c: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM1, 1); msa_insve_d(a, IMM1, c) @@ -5684,6 +6015,7 @@ pub unsafe fn __msa_insve_d(a: v2i64, c: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ld.b, imm_s10 = 0b1111111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ld_b(mem_addr: *mut u8) -> v16i8 { static_assert_simm_bits!(IMM_S10, 10); msa_ld_b(mem_addr, IMM_S10) @@ -5699,6 +6031,7 @@ pub unsafe fn __msa_ld_b(mem_addr: *mut u8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ld.h, imm_s11 = 0b11111111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ld_h(mem_addr: *mut u8) -> v8i16 { static_assert_simm_bits!(IMM_S11, 11); static_assert!(IMM_S11 % 2 == 0); @@ -5715,6 +6048,7 @@ pub unsafe fn __msa_ld_h(mem_addr: *mut u8) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ld.w, imm_s12 = 0b111111111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ld_w(mem_addr: *mut u8) -> v4i32 { static_assert_simm_bits!(IMM_S12, 12); static_assert!(IMM_S12 % 4 == 0); @@ -5731,6 +6065,7 @@ pub unsafe fn __msa_ld_w(mem_addr: *mut u8) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ld.d, imm_s13 = 0b1111111111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ld_d(mem_addr: *mut u8) -> v2i64 { static_assert_simm_bits!(IMM_S13, 13); static_assert!(IMM_S13 % 8 == 0); @@ -5747,6 +6082,7 @@ pub unsafe fn __msa_ld_d(mem_addr: *mut u8) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ldi.b, imm_s10 = 0b1111111111))] #[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ldi_b() -> v16i8 { static_assert_simm_bits!(IMM_S10, 10); msa_ldi_b(IMM_S10) @@ -5762,6 +6098,7 @@ pub unsafe fn __msa_ldi_b() -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ldi.h, imm_s10 = 0b1111111111))] #[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ldi_h() -> v8i16 { static_assert_simm_bits!(IMM_S10, 10); msa_ldi_h(IMM_S10) @@ -5777,6 +6114,7 @@ pub unsafe fn __msa_ldi_h() -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ldi.w, imm_s10 = 0b1111111111))] #[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ldi_w() -> v4i32 { static_assert_simm_bits!(IMM_S10, 10); msa_ldi_w(IMM_S10) @@ -5792,6 +6130,7 @@ pub unsafe fn __msa_ldi_w() -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ldi.d, imm_s10 = 0b1111111111))] #[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ldi_d() -> v2i64 { static_assert_simm_bits!(IMM_S10, 10); msa_ldi_d(IMM_S10) @@ -5808,6 +6147,7 @@ pub unsafe fn __msa_ldi_d() -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(madd_q.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_madd_q_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { msa_madd_q_h(a, mem::transmute(b), c) } @@ -5823,6 +6163,7 @@ pub unsafe fn __msa_madd_q_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(madd_q.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_madd_q_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { msa_madd_q_w(a, mem::transmute(b), c) } @@ -5838,6 +6179,7 @@ pub unsafe fn __msa_madd_q_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maddr_q.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maddr_q_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { msa_maddr_q_h(a, mem::transmute(b), c) } @@ -5853,6 +6195,7 @@ pub unsafe fn __msa_maddr_q_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maddr_q.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maddr_q_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { msa_maddr_q_w(a, mem::transmute(b), c) } @@ -5867,6 +6210,7 @@ pub unsafe fn __msa_maddr_q_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maddv.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maddv_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { msa_maddv_b(a, mem::transmute(b), c) } @@ -5881,6 +6225,7 @@ pub unsafe fn __msa_maddv_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maddv.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maddv_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { msa_maddv_h(a, mem::transmute(b), c) } @@ -5895,6 +6240,7 @@ pub unsafe fn __msa_maddv_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maddv.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maddv_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { msa_maddv_w(a, mem::transmute(b), c) } @@ -5909,6 +6255,7 @@ pub unsafe fn __msa_maddv_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maddv.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maddv_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { msa_maddv_d(a, mem::transmute(b), c) } @@ -5923,6 +6270,7 @@ pub unsafe fn __msa_maddv_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_a.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_max_a_b(a: v16i8, b: v16i8) -> v16i8 { msa_max_a_b(a, mem::transmute(b)) } @@ -5937,6 +6285,7 @@ pub unsafe fn __msa_max_a_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_a.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_max_a_h(a: v8i16, b: v8i16) -> v8i16 { msa_max_a_h(a, mem::transmute(b)) } @@ -5951,6 +6300,7 @@ pub unsafe fn __msa_max_a_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_a.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_max_a_w(a: v4i32, b: v4i32) -> v4i32 { msa_max_a_w(a, mem::transmute(b)) } @@ -5965,6 +6315,7 @@ pub unsafe fn __msa_max_a_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_a.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_max_a_d(a: v2i64, b: v2i64) -> v2i64 { msa_max_a_d(a, mem::transmute(b)) } @@ -5978,6 +6329,7 @@ pub unsafe fn __msa_max_a_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_s.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_max_s_b(a: v16i8, b: v16i8) -> v16i8 { msa_max_s_b(a, mem::transmute(b)) } @@ -5991,6 +6343,7 @@ pub unsafe fn __msa_max_s_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_s.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_max_s_h(a: v8i16, b: v8i16) -> v8i16 { msa_max_s_h(a, mem::transmute(b)) } @@ -6004,6 +6357,7 @@ pub unsafe fn __msa_max_s_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_max_s_w(a: v4i32, b: v4i32) -> v4i32 { msa_max_s_w(a, mem::transmute(b)) } @@ -6017,6 +6371,7 @@ pub unsafe fn __msa_max_s_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_max_s_d(a: v2i64, b: v2i64) -> v2i64 { msa_max_s_d(a, mem::transmute(b)) } @@ -6030,6 +6385,7 @@ pub unsafe fn __msa_max_s_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_u.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_max_u_b(a: v16u8, b: v16u8) -> v16u8 { msa_max_u_b(a, mem::transmute(b)) } @@ -6043,6 +6399,7 @@ pub unsafe fn __msa_max_u_b(a: v16u8, b: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_u.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_max_u_h(a: v8u16, b: v8u16) -> v8u16 { msa_max_u_h(a, mem::transmute(b)) } @@ -6056,6 +6413,7 @@ pub unsafe fn __msa_max_u_h(a: v8u16, b: v8u16) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_max_u_w(a: v4u32, b: v4u32) -> v4u32 { msa_max_u_w(a, mem::transmute(b)) } @@ -6069,6 +6427,7 @@ pub unsafe fn __msa_max_u_w(a: v4u32, b: v4u32) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(max_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_max_u_d(a: v2u64, b: v2u64) -> v2u64 { msa_max_u_d(a, mem::transmute(b)) } @@ -6083,6 +6442,7 @@ pub unsafe fn __msa_max_u_d(a: v2u64, b: v2u64) -> v2u64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maxi_s.b, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maxi_s_b(a: v16i8) -> v16i8 { static_assert_simm_bits!(IMM_S5, 5); msa_maxi_s_b(a, IMM_S5) @@ -6098,6 +6458,7 @@ pub unsafe fn __msa_maxi_s_b(a: v16i8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maxi_s.h, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maxi_s_h(a: v8i16) -> v8i16 { static_assert_simm_bits!(IMM_S5, 5); msa_maxi_s_h(a, IMM_S5) @@ -6113,6 +6474,7 @@ pub unsafe fn __msa_maxi_s_h(a: v8i16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maxi_s.w, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maxi_s_w(a: v4i32) -> v4i32 { static_assert_simm_bits!(IMM_S5, 5); msa_maxi_s_w(a, IMM_S5) @@ -6128,6 +6490,7 @@ pub unsafe fn __msa_maxi_s_w(a: v4i32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maxi_s.d, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maxi_s_d(a: v2i64) -> v2i64 { static_assert_simm_bits!(IMM_S5, 5); msa_maxi_s_d(a, IMM_S5) @@ -6143,6 +6506,7 @@ pub unsafe fn __msa_maxi_s_d(a: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maxi_u.b, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maxi_u_b(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM5, 5); msa_maxi_u_b(a, IMM5) @@ -6158,6 +6522,7 @@ pub unsafe fn __msa_maxi_u_b(a: v16u8) -> v16u8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maxi_u.h, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maxi_u_h(a: v8u16) -> v8u16 { static_assert_uimm_bits!(IMM5, 5); msa_maxi_u_h(a, IMM5) @@ -6173,6 +6538,7 @@ pub unsafe fn __msa_maxi_u_h(a: v8u16) -> v8u16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maxi_u.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maxi_u_w(a: v4u32) -> v4u32 { static_assert_uimm_bits!(IMM5, 5); msa_maxi_u_w(a, IMM5) @@ -6188,6 +6554,7 @@ pub unsafe fn __msa_maxi_u_w(a: v4u32) -> v4u32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(maxi_u.d, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_maxi_u_d(a: v2u64) -> v2u64 { static_assert_uimm_bits!(IMM5, 5); msa_maxi_u_d(a, IMM5) @@ -6203,6 +6570,7 @@ pub unsafe fn __msa_maxi_u_d(a: v2u64) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_a.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_min_a_b(a: v16i8, b: v16i8) -> v16i8 { msa_min_a_b(a, mem::transmute(b)) } @@ -6217,6 +6585,7 @@ pub unsafe fn __msa_min_a_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_a.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_min_a_h(a: v8i16, b: v8i16) -> v8i16 { msa_min_a_h(a, mem::transmute(b)) } @@ -6231,6 +6600,7 @@ pub unsafe fn __msa_min_a_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_a.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_min_a_w(a: v4i32, b: v4i32) -> v4i32 { msa_min_a_w(a, mem::transmute(b)) } @@ -6245,6 +6615,7 @@ pub unsafe fn __msa_min_a_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_a.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_min_a_d(a: v2i64, b: v2i64) -> v2i64 { msa_min_a_d(a, mem::transmute(b)) } @@ -6258,6 +6629,7 @@ pub unsafe fn __msa_min_a_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_s.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_min_s_b(a: v16i8, b: v16i8) -> v16i8 { msa_min_s_b(a, mem::transmute(b)) } @@ -6271,6 +6643,7 @@ pub unsafe fn __msa_min_s_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_s.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_min_s_h(a: v8i16, b: v8i16) -> v8i16 { msa_min_s_h(a, mem::transmute(b)) } @@ -6284,6 +6657,7 @@ pub unsafe fn __msa_min_s_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_min_s_w(a: v4i32, b: v4i32) -> v4i32 { msa_min_s_w(a, mem::transmute(b)) } @@ -6297,6 +6671,7 @@ pub unsafe fn __msa_min_s_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_min_s_d(a: v2i64, b: v2i64) -> v2i64 { msa_min_s_d(a, mem::transmute(b)) } @@ -6311,6 +6686,7 @@ pub unsafe fn __msa_min_s_d(a: v2i64, b: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mini_s.b, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mini_s_b(a: v16i8) -> v16i8 { static_assert_simm_bits!(IMM_S5, 5); msa_mini_s_b(a, IMM_S5) @@ -6326,6 +6702,7 @@ pub unsafe fn __msa_mini_s_b(a: v16i8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mini_s.h, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mini_s_h(a: v8i16) -> v8i16 { static_assert_simm_bits!(IMM_S5, 5); msa_mini_s_h(a, IMM_S5) @@ -6341,6 +6718,7 @@ pub unsafe fn __msa_mini_s_h(a: v8i16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mini_s.w, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mini_s_w(a: v4i32) -> v4i32 { static_assert_simm_bits!(IMM_S5, 5); msa_mini_s_w(a, IMM_S5) @@ -6356,6 +6734,7 @@ pub unsafe fn __msa_mini_s_w(a: v4i32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mini_s.d, imm_s5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mini_s_d(a: v2i64) -> v2i64 { static_assert_simm_bits!(IMM_S5, 5); msa_mini_s_d(a, IMM_S5) @@ -6370,6 +6749,7 @@ pub unsafe fn __msa_mini_s_d(a: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_u.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_min_u_b(a: v16u8, b: v16u8) -> v16u8 { msa_min_u_b(a, mem::transmute(b)) } @@ -6383,6 +6763,7 @@ pub unsafe fn __msa_min_u_b(a: v16u8, b: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_u.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_min_u_h(a: v8u16, b: v8u16) -> v8u16 { msa_min_u_h(a, mem::transmute(b)) } @@ -6396,6 +6777,7 @@ pub unsafe fn __msa_min_u_h(a: v8u16, b: v8u16) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_min_u_w(a: v4u32, b: v4u32) -> v4u32 { msa_min_u_w(a, mem::transmute(b)) } @@ -6409,6 +6791,7 @@ pub unsafe fn __msa_min_u_w(a: v4u32, b: v4u32) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(min_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_min_u_d(a: v2u64, b: v2u64) -> v2u64 { msa_min_u_d(a, mem::transmute(b)) } @@ -6423,6 +6806,7 @@ pub unsafe fn __msa_min_u_d(a: v2u64, b: v2u64) -> v2u64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mini_u.b, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mini_u_b(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM5, 5); msa_mini_u_b(a, IMM5) @@ -6438,6 +6822,7 @@ pub unsafe fn __msa_mini_u_b(a: v16u8) -> v16u8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mini_u.h, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mini_u_h(a: v8u16) -> v8u16 { static_assert_uimm_bits!(IMM5, 5); msa_mini_u_h(a, IMM5) @@ -6453,6 +6838,7 @@ pub unsafe fn __msa_mini_u_h(a: v8u16) -> v8u16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mini_u.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mini_u_w(a: v4u32) -> v4u32 { static_assert_uimm_bits!(IMM5, 5); msa_mini_u_w(a, IMM5) @@ -6468,6 +6854,7 @@ pub unsafe fn __msa_mini_u_w(a: v4u32) -> v4u32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mini_u.d, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mini_u_d(a: v2u64) -> v2u64 { static_assert_uimm_bits!(IMM5, 5); msa_mini_u_d(a, IMM5) @@ -6484,6 +6871,7 @@ pub unsafe fn __msa_mini_u_d(a: v2u64) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mod_s.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mod_s_b(a: v16i8, b: v16i8) -> v16i8 { msa_mod_s_b(a, mem::transmute(b)) } @@ -6499,6 +6887,7 @@ pub unsafe fn __msa_mod_s_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mod_s.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mod_s_h(a: v8i16, b: v8i16) -> v8i16 { msa_mod_s_h(a, mem::transmute(b)) } @@ -6514,6 +6903,7 @@ pub unsafe fn __msa_mod_s_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mod_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mod_s_w(a: v4i32, b: v4i32) -> v4i32 { msa_mod_s_w(a, mem::transmute(b)) } @@ -6529,6 +6919,7 @@ pub unsafe fn __msa_mod_s_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mod_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mod_s_d(a: v2i64, b: v2i64) -> v2i64 { msa_mod_s_d(a, mem::transmute(b)) } @@ -6544,6 +6935,7 @@ pub unsafe fn __msa_mod_s_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mod_u.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mod_u_b(a: v16u8, b: v16u8) -> v16u8 { msa_mod_u_b(a, mem::transmute(b)) } @@ -6559,6 +6951,7 @@ pub unsafe fn __msa_mod_u_b(a: v16u8, b: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mod_u.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mod_u_h(a: v8u16, b: v8u16) -> v8u16 { msa_mod_u_h(a, mem::transmute(b)) } @@ -6574,6 +6967,7 @@ pub unsafe fn __msa_mod_u_h(a: v8u16, b: v8u16) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mod_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mod_u_w(a: v4u32, b: v4u32) -> v4u32 { msa_mod_u_w(a, mem::transmute(b)) } @@ -6589,6 +6983,7 @@ pub unsafe fn __msa_mod_u_w(a: v4u32, b: v4u32) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mod_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mod_u_d(a: v2u64, b: v2u64) -> v2u64 { msa_mod_u_d(a, mem::transmute(b)) } @@ -6601,6 +6996,7 @@ pub unsafe fn __msa_mod_u_d(a: v2u64, b: v2u64) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(move.v))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_move_v(a: v16i8) -> v16i8 { msa_move_v(a) } @@ -6617,6 +7013,7 @@ pub unsafe fn __msa_move_v(a: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(msub_q.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_msub_q_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { msa_msub_q_h(a, mem::transmute(b), c) } @@ -6633,6 +7030,7 @@ pub unsafe fn __msa_msub_q_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(msub_q.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_msub_q_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { msa_msub_q_w(a, mem::transmute(b), c) } @@ -6649,6 +7047,7 @@ pub unsafe fn __msa_msub_q_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(msubr_q.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_msubr_q_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { msa_msubr_q_h(a, mem::transmute(b), c) } @@ -6665,6 +7064,7 @@ pub unsafe fn __msa_msubr_q_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(msubr_q.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_msubr_q_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { msa_msubr_q_w(a, mem::transmute(b), c) } @@ -6679,6 +7079,7 @@ pub unsafe fn __msa_msubr_q_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(msubv.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_msubv_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { msa_msubv_b(a, mem::transmute(b), c) } @@ -6693,6 +7094,7 @@ pub unsafe fn __msa_msubv_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(msubv.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_msubv_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { msa_msubv_h(a, mem::transmute(b), c) } @@ -6707,6 +7109,7 @@ pub unsafe fn __msa_msubv_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(msubv.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_msubv_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { msa_msubv_w(a, mem::transmute(b), c) } @@ -6721,6 +7124,7 @@ pub unsafe fn __msa_msubv_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(msubv.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_msubv_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { msa_msubv_d(a, mem::transmute(b), c) } @@ -6734,6 +7138,7 @@ pub unsafe fn __msa_msubv_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mul_q.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mul_q_h(a: v8i16, b: v8i16) -> v8i16 { msa_mul_q_h(a, mem::transmute(b)) } @@ -6747,6 +7152,7 @@ pub unsafe fn __msa_mul_q_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mul_q.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mul_q_w(a: v4i32, b: v4i32) -> v4i32 { msa_mul_q_w(a, mem::transmute(b)) } @@ -6760,6 +7166,7 @@ pub unsafe fn __msa_mul_q_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mulr_q.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mulr_q_h(a: v8i16, b: v8i16) -> v8i16 { msa_mulr_q_h(a, mem::transmute(b)) } @@ -6773,6 +7180,7 @@ pub unsafe fn __msa_mulr_q_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mulr_q.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mulr_q_w(a: v4i32, b: v4i32) -> v4i32 { msa_mulr_q_w(a, mem::transmute(b)) } @@ -6787,6 +7195,7 @@ pub unsafe fn __msa_mulr_q_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mulv.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mulv_b(a: v16i8, b: v16i8) -> v16i8 { msa_mulv_b(a, mem::transmute(b)) } @@ -6801,6 +7210,7 @@ pub unsafe fn __msa_mulv_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mulv.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mulv_h(a: v8i16, b: v8i16) -> v8i16 { msa_mulv_h(a, mem::transmute(b)) } @@ -6815,6 +7225,7 @@ pub unsafe fn __msa_mulv_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mulv.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mulv_w(a: v4i32, b: v4i32) -> v4i32 { msa_mulv_w(a, mem::transmute(b)) } @@ -6829,6 +7240,7 @@ pub unsafe fn __msa_mulv_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(mulv.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_mulv_d(a: v2i64, b: v2i64) -> v2i64 { msa_mulv_d(a, mem::transmute(b)) } @@ -6841,6 +7253,7 @@ pub unsafe fn __msa_mulv_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nloc.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_nloc_b(a: v16i8) -> v16i8 { msa_nloc_b(a) } @@ -6853,6 +7266,7 @@ pub unsafe fn __msa_nloc_b(a: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nloc.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_nloc_h(a: v8i16) -> v8i16 { msa_nloc_h(a) } @@ -6865,6 +7279,7 @@ pub unsafe fn __msa_nloc_h(a: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nloc.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_nloc_w(a: v4i32) -> v4i32 { msa_nloc_w(a) } @@ -6877,6 +7292,7 @@ pub unsafe fn __msa_nloc_w(a: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nloc.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_nloc_d(a: v2i64) -> v2i64 { msa_nloc_d(a) } @@ -6889,6 +7305,7 @@ pub unsafe fn __msa_nloc_d(a: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nlzc.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_nlzc_b(a: v16i8) -> v16i8 { msa_nlzc_b(a) } @@ -6901,6 +7318,7 @@ pub unsafe fn __msa_nlzc_b(a: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nlzc.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_nlzc_h(a: v8i16) -> v8i16 { msa_nlzc_h(a) } @@ -6913,6 +7331,7 @@ pub unsafe fn __msa_nlzc_h(a: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nlzc.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_nlzc_w(a: v4i32) -> v4i32 { msa_nlzc_w(a) } @@ -6925,6 +7344,7 @@ pub unsafe fn __msa_nlzc_w(a: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nlzc.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_nlzc_d(a: v2i64) -> v2i64 { msa_nlzc_d(a) } @@ -6939,6 +7359,7 @@ pub unsafe fn __msa_nlzc_d(a: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nor.v))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_nor_v(a: v16u8, b: v16u8) -> v16u8 { msa_nor_v(a, mem::transmute(b)) } @@ -6954,6 +7375,7 @@ pub unsafe fn __msa_nor_v(a: v16u8, b: v16u8) -> v16u8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(nori.b, imm8 = 0b11111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_nori_b(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM8, 8); msa_nori_b(a, IMM8) @@ -6969,6 +7391,7 @@ pub unsafe fn __msa_nori_b(a: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(or.v))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_or_v(a: v16u8, b: v16u8) -> v16u8 { msa_or_v(a, mem::transmute(b)) } @@ -6984,6 +7407,7 @@ pub unsafe fn __msa_or_v(a: v16u8, b: v16u8) -> v16u8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(ori.b, imm8 = 0b11111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_ori_b(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM8, 8); msa_ori_b(a, IMM8) @@ -6998,6 +7422,7 @@ pub unsafe fn __msa_ori_b(a: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pckev.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_pckev_b(a: v16i8, b: v16i8) -> v16i8 { msa_pckev_b(a, mem::transmute(b)) } @@ -7011,6 +7436,7 @@ pub unsafe fn __msa_pckev_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pckev.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_pckev_h(a: v8i16, b: v8i16) -> v8i16 { msa_pckev_h(a, mem::transmute(b)) } @@ -7024,6 +7450,7 @@ pub unsafe fn __msa_pckev_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pckev.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_pckev_w(a: v4i32, b: v4i32) -> v4i32 { msa_pckev_w(a, mem::transmute(b)) } @@ -7037,6 +7464,7 @@ pub unsafe fn __msa_pckev_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pckev.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_pckev_d(a: v2i64, b: v2i64) -> v2i64 { msa_pckev_d(a, mem::transmute(b)) } @@ -7050,6 +7478,7 @@ pub unsafe fn __msa_pckev_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pckod.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_pckod_b(a: v16i8, b: v16i8) -> v16i8 { msa_pckod_b(a, mem::transmute(b)) } @@ -7063,6 +7492,7 @@ pub unsafe fn __msa_pckod_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pckod.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_pckod_h(a: v8i16, b: v8i16) -> v8i16 { msa_pckod_h(a, mem::transmute(b)) } @@ -7076,6 +7506,7 @@ pub unsafe fn __msa_pckod_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pckod.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_pckod_w(a: v4i32, b: v4i32) -> v4i32 { msa_pckod_w(a, mem::transmute(b)) } @@ -7089,6 +7520,7 @@ pub unsafe fn __msa_pckod_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pckod.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_pckod_d(a: v2i64, b: v2i64) -> v2i64 { msa_pckod_d(a, mem::transmute(b)) } @@ -7101,6 +7533,7 @@ pub unsafe fn __msa_pckod_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pcnt.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_pcnt_b(a: v16i8) -> v16i8 { msa_pcnt_b(a) } @@ -7113,6 +7546,7 @@ pub unsafe fn __msa_pcnt_b(a: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pcnt.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_pcnt_h(a: v8i16) -> v8i16 { msa_pcnt_h(a) } @@ -7125,6 +7559,7 @@ pub unsafe fn __msa_pcnt_h(a: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pcnt.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_pcnt_w(a: v4i32) -> v4i32 { msa_pcnt_w(a) } @@ -7137,6 +7572,7 @@ pub unsafe fn __msa_pcnt_w(a: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(pcnt.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_pcnt_d(a: v2i64) -> v2i64 { msa_pcnt_d(a) } @@ -7151,6 +7587,7 @@ pub unsafe fn __msa_pcnt_d(a: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sat_s.b, imm4 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sat_s_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM3, 3); msa_sat_s_b(a, IMM3) @@ -7166,6 +7603,7 @@ pub unsafe fn __msa_sat_s_b(a: v16i8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sat_s.h, imm3 = 0b1111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sat_s_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM4, 4); msa_sat_s_h(a, IMM4) @@ -7181,6 +7619,7 @@ pub unsafe fn __msa_sat_s_h(a: v8i16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sat_s.w, imm2 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sat_s_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); msa_sat_s_w(a, IMM5) @@ -7196,6 +7635,7 @@ pub unsafe fn __msa_sat_s_w(a: v4i32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sat_s.d, imm1 = 0b111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sat_s_d(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM6, 6); msa_sat_s_d(a, IMM6) @@ -7211,6 +7651,7 @@ pub unsafe fn __msa_sat_s_d(a: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sat_u.b, imm4 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sat_u_b(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM3, 3); msa_sat_u_b(a, IMM3) @@ -7226,6 +7667,7 @@ pub unsafe fn __msa_sat_u_b(a: v16u8) -> v16u8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sat_u.h, imm3 = 0b1111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sat_u_h(a: v8u16) -> v8u16 { static_assert_uimm_bits!(IMM4, 4); msa_sat_u_h(a, IMM4) @@ -7241,6 +7683,7 @@ pub unsafe fn __msa_sat_u_h(a: v8u16) -> v8u16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sat_u.w, imm2 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sat_u_w(a: v4u32) -> v4u32 { static_assert_uimm_bits!(IMM5, 5); msa_sat_u_w(a, IMM5) @@ -7256,6 +7699,7 @@ pub unsafe fn __msa_sat_u_w(a: v4u32) -> v4u32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sat_u.d, imm1 = 0b111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sat_u_d(a: v2u64) -> v2u64 { static_assert_uimm_bits!(IMM6, 6); msa_sat_u_d(a, IMM6) @@ -7272,6 +7716,7 @@ pub unsafe fn __msa_sat_u_d(a: v2u64) -> v2u64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(shf.b, imm8 = 0b11111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_shf_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM8, 8); msa_shf_b(a, IMM8) @@ -7288,6 +7733,7 @@ pub unsafe fn __msa_shf_b(a: v16i8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(shf.h, imm8 = 0b11111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_shf_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM8, 8); msa_shf_h(a, IMM8) @@ -7304,6 +7750,7 @@ pub unsafe fn __msa_shf_h(a: v8i16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(shf.w, imm8 = 0b11111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_shf_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM8, 8); msa_shf_w(a, IMM8) @@ -7325,6 +7772,7 @@ pub unsafe fn __msa_shf_w(a: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sld.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sld_b(a: v16i8, b: v16i8, c: i32) -> v16i8 { msa_sld_b(a, mem::transmute(b), c) } @@ -7345,6 +7793,7 @@ pub unsafe fn __msa_sld_b(a: v16i8, b: v16i8, c: i32) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sld.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sld_h(a: v8i16, b: v8i16, c: i32) -> v8i16 { msa_sld_h(a, mem::transmute(b), c) } @@ -7365,6 +7814,7 @@ pub unsafe fn __msa_sld_h(a: v8i16, b: v8i16, c: i32) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sld.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sld_w(a: v4i32, b: v4i32, c: i32) -> v4i32 { msa_sld_w(a, mem::transmute(b), c) } @@ -7385,6 +7835,7 @@ pub unsafe fn __msa_sld_w(a: v4i32, b: v4i32, c: i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sld.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sld_d(a: v2i64, b: v2i64, c: i32) -> v2i64 { msa_sld_d(a, mem::transmute(b), c) } @@ -7404,6 +7855,7 @@ pub unsafe fn __msa_sld_d(a: v2i64, b: v2i64, c: i32) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sldi.b, imm4 = 0b1111))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sldi_b(a: v16i8, b: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM4, 4); msa_sldi_b(a, mem::transmute(b), IMM4) @@ -7424,6 +7876,7 @@ pub unsafe fn __msa_sldi_b(a: v16i8, b: v16i8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sldi.h, imm3 = 0b111))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sldi_h(a: v8i16, b: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM3, 3); msa_sldi_h(a, mem::transmute(b), IMM3) @@ -7444,6 +7897,7 @@ pub unsafe fn __msa_sldi_h(a: v8i16, b: v8i16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sldi.w, imm2 = 0b11))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sldi_w(a: v4i32, b: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM2, 2); msa_sldi_w(a, mem::transmute(b), IMM2) @@ -7464,6 +7918,7 @@ pub unsafe fn __msa_sldi_w(a: v4i32, b: v4i32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sldi.d, imm1 = 0b1))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sldi_d(a: v2i64, b: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM1, 1); msa_sldi_d(a, mem::transmute(b), IMM1) @@ -7479,6 +7934,7 @@ pub unsafe fn __msa_sldi_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sll.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sll_b(a: v16i8, b: v16i8) -> v16i8 { msa_sll_b(a, mem::transmute(b)) } @@ -7493,6 +7949,7 @@ pub unsafe fn __msa_sll_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sll.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sll_h(a: v8i16, b: v8i16) -> v8i16 { msa_sll_h(a, mem::transmute(b)) } @@ -7507,6 +7964,7 @@ pub unsafe fn __msa_sll_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sll.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sll_w(a: v4i32, b: v4i32) -> v4i32 { msa_sll_w(a, mem::transmute(b)) } @@ -7521,6 +7979,7 @@ pub unsafe fn __msa_sll_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sll.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sll_d(a: v2i64, b: v2i64) -> v2i64 { msa_sll_d(a, mem::transmute(b)) } @@ -7535,6 +7994,7 @@ pub unsafe fn __msa_sll_d(a: v2i64, b: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(slli.b, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_slli_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM4, 4); msa_slli_b(a, IMM4) @@ -7550,6 +8010,7 @@ pub unsafe fn __msa_slli_b(a: v16i8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(slli.h, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_slli_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM3, 3); msa_slli_h(a, IMM3) @@ -7565,6 +8026,7 @@ pub unsafe fn __msa_slli_h(a: v8i16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(slli.w, imm2 = 0b11))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_slli_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM2, 2); msa_slli_w(a, IMM2) @@ -7580,6 +8042,7 @@ pub unsafe fn __msa_slli_w(a: v4i32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(slli.d, imm1 = 0b1))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_slli_d(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM1, 1); msa_slli_d(a, IMM1) @@ -7595,6 +8058,7 @@ pub unsafe fn __msa_slli_d(a: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(splat.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_splat_b(a: v16i8, b: i32) -> v16i8 { msa_splat_b(a, mem::transmute(b)) } @@ -7609,6 +8073,7 @@ pub unsafe fn __msa_splat_b(a: v16i8, b: i32) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(splat.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_splat_h(a: v8i16, b: i32) -> v8i16 { msa_splat_h(a, mem::transmute(b)) } @@ -7623,6 +8088,7 @@ pub unsafe fn __msa_splat_h(a: v8i16, b: i32) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(splat.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_splat_w(a: v4i32, b: i32) -> v4i32 { msa_splat_w(a, mem::transmute(b)) } @@ -7637,6 +8103,7 @@ pub unsafe fn __msa_splat_w(a: v4i32, b: i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(splat.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_splat_d(a: v2i64, b: i32) -> v2i64 { msa_splat_d(a, mem::transmute(b)) } @@ -7650,6 +8117,7 @@ pub unsafe fn __msa_splat_d(a: v2i64, b: i32) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(splati.b, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_splati_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM4, 4); msa_splati_b(a, IMM4) @@ -7664,6 +8132,7 @@ pub unsafe fn __msa_splati_b(a: v16i8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(splati.h, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_splati_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM3, 3); msa_splati_h(a, IMM3) @@ -7678,6 +8147,7 @@ pub unsafe fn __msa_splati_h(a: v8i16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(splati.w, imm2 = 0b11))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_splati_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM2, 2); msa_splati_w(a, IMM2) @@ -7692,6 +8162,7 @@ pub unsafe fn __msa_splati_w(a: v4i32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(splati.d, imm1 = 0b1))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_splati_d(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM1, 1); msa_splati_d(a, IMM1) @@ -7707,6 +8178,7 @@ pub unsafe fn __msa_splati_d(a: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sra.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sra_b(a: v16i8, b: v16i8) -> v16i8 { msa_sra_b(a, mem::transmute(b)) } @@ -7721,6 +8193,7 @@ pub unsafe fn __msa_sra_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sra.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sra_h(a: v8i16, b: v8i16) -> v8i16 { msa_sra_h(a, mem::transmute(b)) } @@ -7735,6 +8208,7 @@ pub unsafe fn __msa_sra_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sra.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sra_w(a: v4i32, b: v4i32) -> v4i32 { msa_sra_w(a, mem::transmute(b)) } @@ -7749,6 +8223,7 @@ pub unsafe fn __msa_sra_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(sra.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_sra_d(a: v2i64, b: v2i64) -> v2i64 { msa_sra_d(a, mem::transmute(b)) } @@ -7763,6 +8238,7 @@ pub unsafe fn __msa_sra_d(a: v2i64, b: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srai.b, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srai_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM3, 3); msa_srai_b(a, IMM3) @@ -7778,6 +8254,7 @@ pub unsafe fn __msa_srai_b(a: v16i8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srai.h, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srai_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM4, 4); msa_srai_h(a, IMM4) @@ -7793,6 +8270,7 @@ pub unsafe fn __msa_srai_h(a: v8i16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srai.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srai_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); msa_srai_w(a, IMM5) @@ -7808,6 +8286,7 @@ pub unsafe fn __msa_srai_w(a: v4i32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srai.d, imm6 = 0b111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srai_d(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM6, 6); msa_srai_d(a, IMM6) @@ -7824,6 +8303,7 @@ pub unsafe fn __msa_srai_d(a: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srar.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srar_b(a: v16i8, b: v16i8) -> v16i8 { msa_srar_b(a, mem::transmute(b)) } @@ -7839,6 +8319,7 @@ pub unsafe fn __msa_srar_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srar.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srar_h(a: v8i16, b: v8i16) -> v8i16 { msa_srar_h(a, mem::transmute(b)) } @@ -7854,6 +8335,7 @@ pub unsafe fn __msa_srar_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srar.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srar_w(a: v4i32, b: v4i32) -> v4i32 { msa_srar_w(a, mem::transmute(b)) } @@ -7869,6 +8351,7 @@ pub unsafe fn __msa_srar_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srar.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srar_d(a: v2i64, b: v2i64) -> v2i64 { msa_srar_d(a, mem::transmute(b)) } @@ -7884,6 +8367,7 @@ pub unsafe fn __msa_srar_d(a: v2i64, b: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srari.b, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srari_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM3, 3); msa_srari_b(a, IMM3) @@ -7900,6 +8384,7 @@ pub unsafe fn __msa_srari_b(a: v16i8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srari.h, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srari_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM4, 4); msa_srari_h(a, IMM4) @@ -7916,6 +8401,7 @@ pub unsafe fn __msa_srari_h(a: v8i16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srari.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srari_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); msa_srari_w(a, IMM5) @@ -7932,6 +8418,7 @@ pub unsafe fn __msa_srari_w(a: v4i32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srari.d, imm6 = 0b111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srari_d(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM6, 6); msa_srari_d(a, IMM6) @@ -7947,6 +8434,7 @@ pub unsafe fn __msa_srari_d(a: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srl.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srl_b(a: v16i8, b: v16i8) -> v16i8 { msa_srl_b(a, mem::transmute(b)) } @@ -7961,6 +8449,7 @@ pub unsafe fn __msa_srl_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srl.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srl_h(a: v8i16, b: v8i16) -> v8i16 { msa_srl_h(a, mem::transmute(b)) } @@ -7975,6 +8464,7 @@ pub unsafe fn __msa_srl_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srl.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srl_w(a: v4i32, b: v4i32) -> v4i32 { msa_srl_w(a, mem::transmute(b)) } @@ -7989,6 +8479,7 @@ pub unsafe fn __msa_srl_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srl.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srl_d(a: v2i64, b: v2i64) -> v2i64 { msa_srl_d(a, mem::transmute(b)) } @@ -8003,6 +8494,7 @@ pub unsafe fn __msa_srl_d(a: v2i64, b: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srli.b, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srli_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM4, 4); msa_srli_b(a, IMM4) @@ -8018,6 +8510,7 @@ pub unsafe fn __msa_srli_b(a: v16i8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srli.h, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srli_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM3, 3); msa_srli_h(a, IMM3) @@ -8033,6 +8526,7 @@ pub unsafe fn __msa_srli_h(a: v8i16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srli.w, imm2 = 0b11))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srli_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM2, 2); msa_srli_w(a, IMM2) @@ -8048,6 +8542,7 @@ pub unsafe fn __msa_srli_w(a: v4i32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srli.d, imm1 = 0b1))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srli_d(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM1, 1); msa_srli_d(a, IMM1) @@ -8064,6 +8559,7 @@ pub unsafe fn __msa_srli_d(a: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srlr.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srlr_b(a: v16i8, b: v16i8) -> v16i8 { msa_srlr_b(a, mem::transmute(b)) } @@ -8079,6 +8575,7 @@ pub unsafe fn __msa_srlr_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srlr.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srlr_h(a: v8i16, b: v8i16) -> v8i16 { msa_srlr_h(a, mem::transmute(b)) } @@ -8094,6 +8591,7 @@ pub unsafe fn __msa_srlr_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srlr.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srlr_w(a: v4i32, b: v4i32) -> v4i32 { msa_srlr_w(a, mem::transmute(b)) } @@ -8109,6 +8607,7 @@ pub unsafe fn __msa_srlr_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srlr.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srlr_d(a: v2i64, b: v2i64) -> v2i64 { msa_srlr_d(a, mem::transmute(b)) } @@ -8124,6 +8623,7 @@ pub unsafe fn __msa_srlr_d(a: v2i64, b: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srlri.b, imm3 = 0b111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srlri_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM3, 3); msa_srlri_b(a, IMM3) @@ -8140,6 +8640,7 @@ pub unsafe fn __msa_srlri_b(a: v16i8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srlri.h, imm4 = 0b1111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srlri_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM4, 4); msa_srlri_h(a, IMM4) @@ -8156,6 +8657,7 @@ pub unsafe fn __msa_srlri_h(a: v8i16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srlri.w, imm5 = 0b11111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srlri_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); msa_srlri_w(a, IMM5) @@ -8172,6 +8674,7 @@ pub unsafe fn __msa_srlri_w(a: v4i32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(srlri.d, imm6 = 0b111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_srlri_d(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM6, 6); msa_srlri_d(a, IMM6) @@ -8187,6 +8690,7 @@ pub unsafe fn __msa_srlri_d(a: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(st.b, imm_s10 = 0b1111111111))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_st_b(a: v16i8, mem_addr: *mut u8) -> () { static_assert_simm_bits!(IMM_S10, 10); msa_st_b(a, mem_addr, IMM_S10) @@ -8202,6 +8706,7 @@ pub unsafe fn __msa_st_b(a: v16i8, mem_addr: *mut u8) -> () #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(st.h, imm_s11 = 0b11111111111))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_st_h(a: v8i16, mem_addr: *mut u8) -> () { static_assert_simm_bits!(IMM_S11, 11); static_assert!(IMM_S11 % 2 == 0); @@ -8218,6 +8723,7 @@ pub unsafe fn __msa_st_h(a: v8i16, mem_addr: *mut u8) -> () #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(st.w, imm_s12 = 0b111111111111))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_st_w(a: v4i32, mem_addr: *mut u8) -> () { static_assert_simm_bits!(IMM_S12, 12); static_assert!(IMM_S12 % 4 == 0); @@ -8234,6 +8740,7 @@ pub unsafe fn __msa_st_w(a: v4i32, mem_addr: *mut u8) -> () #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(st.d, imm_s13 = 0b1111111111111))] #[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_st_d(a: v2i64, mem_addr: *mut u8) -> () { static_assert_simm_bits!(IMM_S13, 13); static_assert!(IMM_S13 % 8 == 0); @@ -8250,6 +8757,7 @@ pub unsafe fn __msa_st_d(a: v2i64, mem_addr: *mut u8) -> () #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subs_s.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subs_s_b(a: v16i8, b: v16i8) -> v16i8 { msa_subs_s_b(a, mem::transmute(b)) } @@ -8264,6 +8772,7 @@ pub unsafe fn __msa_subs_s_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subs_s.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subs_s_h(a: v8i16, b: v8i16) -> v8i16 { msa_subs_s_h(a, mem::transmute(b)) } @@ -8278,6 +8787,7 @@ pub unsafe fn __msa_subs_s_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subs_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subs_s_w(a: v4i32, b: v4i32) -> v4i32 { msa_subs_s_w(a, mem::transmute(b)) } @@ -8292,6 +8802,7 @@ pub unsafe fn __msa_subs_s_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subs_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subs_s_d(a: v2i64, b: v2i64) -> v2i64 { msa_subs_s_d(a, mem::transmute(b)) } @@ -8306,6 +8817,7 @@ pub unsafe fn __msa_subs_s_d(a: v2i64, b: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subs_u.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subs_u_b(a: v16u8, b: v16u8) -> v16u8 { msa_subs_u_b(a, mem::transmute(b)) } @@ -8320,6 +8832,7 @@ pub unsafe fn __msa_subs_u_b(a: v16u8, b: v16u8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subs_u.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subs_u_h(a: v8u16, b: v8u16) -> v8u16 { msa_subs_u_h(a, mem::transmute(b)) } @@ -8334,6 +8847,7 @@ pub unsafe fn __msa_subs_u_h(a: v8u16, b: v8u16) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subs_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subs_u_w(a: v4u32, b: v4u32) -> v4u32 { msa_subs_u_w(a, mem::transmute(b)) } @@ -8348,6 +8862,7 @@ pub unsafe fn __msa_subs_u_w(a: v4u32, b: v4u32) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subs_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subs_u_d(a: v2u64, b: v2u64) -> v2u64 { msa_subs_u_d(a, mem::transmute(b)) } @@ -8362,6 +8877,7 @@ pub unsafe fn __msa_subs_u_d(a: v2u64, b: v2u64) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subsus_u.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subsus_u_b(a: v16u8, b: v16i8) -> v16u8 { msa_subsus_u_b(a, mem::transmute(b)) } @@ -8376,6 +8892,7 @@ pub unsafe fn __msa_subsus_u_b(a: v16u8, b: v16i8) -> v16u8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subsus_u.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subsus_u_h(a: v8u16, b: v8i16) -> v8u16 { msa_subsus_u_h(a, mem::transmute(b)) } @@ -8390,6 +8907,7 @@ pub unsafe fn __msa_subsus_u_h(a: v8u16, b: v8i16) -> v8u16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subsus_u.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subsus_u_w(a: v4u32, b: v4i32) -> v4u32 { msa_subsus_u_w(a, mem::transmute(b)) } @@ -8404,6 +8922,7 @@ pub unsafe fn __msa_subsus_u_w(a: v4u32, b: v4i32) -> v4u32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subsus_u.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subsus_u_d(a: v2u64, b: v2i64) -> v2u64 { msa_subsus_u_d(a, mem::transmute(b)) } @@ -8418,6 +8937,7 @@ pub unsafe fn __msa_subsus_u_d(a: v2u64, b: v2i64) -> v2u64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subsuu_s.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subsuu_s_b(a: v16u8, b: v16u8) -> v16i8 { msa_subsuu_s_b(a, mem::transmute(b)) } @@ -8432,6 +8952,7 @@ pub unsafe fn __msa_subsuu_s_b(a: v16u8, b: v16u8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subsuu_s.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subsuu_s_h(a: v8u16, b: v8u16) -> v8i16 { msa_subsuu_s_h(a, mem::transmute(b)) } @@ -8446,6 +8967,7 @@ pub unsafe fn __msa_subsuu_s_h(a: v8u16, b: v8u16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subsuu_s.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subsuu_s_w(a: v4u32, b: v4u32) -> v4i32 { msa_subsuu_s_w(a, mem::transmute(b)) } @@ -8460,6 +8982,7 @@ pub unsafe fn __msa_subsuu_s_w(a: v4u32, b: v4u32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subsuu_s.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subsuu_s_d(a: v2u64, b: v2u64) -> v2i64 { msa_subsuu_s_d(a, mem::transmute(b)) } @@ -8473,6 +8996,7 @@ pub unsafe fn __msa_subsuu_s_d(a: v2u64, b: v2u64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subv.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subv_b(a: v16i8, b: v16i8) -> v16i8 { msa_subv_b(a, mem::transmute(b)) } @@ -8486,6 +9010,7 @@ pub unsafe fn __msa_subv_b(a: v16i8, b: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subv.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subv_h(a: v8i16, b: v8i16) -> v8i16 { msa_subv_h(a, mem::transmute(b)) } @@ -8499,6 +9024,7 @@ pub unsafe fn __msa_subv_h(a: v8i16, b: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subv.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subv_w(a: v4i32, b: v4i32) -> v4i32 { msa_subv_w(a, mem::transmute(b)) } @@ -8512,6 +9038,7 @@ pub unsafe fn __msa_subv_w(a: v4i32, b: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subv.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subv_d(a: v2i64, b: v2i64) -> v2i64 { msa_subv_d(a, mem::transmute(b)) } @@ -8526,6 +9053,7 @@ pub unsafe fn __msa_subv_d(a: v2i64, b: v2i64) -> v2i64 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subvi.b, imm5 = 0b10111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subvi_b(a: v16i8) -> v16i8 { static_assert_uimm_bits!(IMM5, 5); msa_subvi_b(a, IMM5) @@ -8541,6 +9069,7 @@ pub unsafe fn __msa_subvi_b(a: v16i8) -> v16i8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subvi.h, imm5 = 0b10111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subvi_h(a: v8i16) -> v8i16 { static_assert_uimm_bits!(IMM5, 5); msa_subvi_h(a, IMM5) @@ -8556,6 +9085,7 @@ pub unsafe fn __msa_subvi_h(a: v8i16) -> v8i16 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subvi.w, imm5 = 0b10111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subvi_w(a: v4i32) -> v4i32 { static_assert_uimm_bits!(IMM5, 5); msa_subvi_w(a, IMM5) @@ -8571,6 +9101,7 @@ pub unsafe fn __msa_subvi_w(a: v4i32) -> v4i32 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(subvi.d, imm5 = 0b10111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_subvi_d(a: v2i64) -> v2i64 { static_assert_uimm_bits!(IMM5, 5); msa_subvi_d(a, IMM5) @@ -8589,6 +9120,7 @@ pub unsafe fn __msa_subvi_d(a: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(vshf.b))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_vshf_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { msa_vshf_b(a, mem::transmute(b), c) } @@ -8606,6 +9138,7 @@ pub unsafe fn __msa_vshf_b(a: v16i8, b: v16i8, c: v16i8) -> v16i8 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(vshf.h))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_vshf_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { msa_vshf_h(a, mem::transmute(b), c) } @@ -8623,6 +9156,7 @@ pub unsafe fn __msa_vshf_h(a: v8i16, b: v8i16, c: v8i16) -> v8i16 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(vshf.w))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_vshf_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { msa_vshf_w(a, mem::transmute(b), c) } @@ -8640,6 +9174,7 @@ pub unsafe fn __msa_vshf_w(a: v4i32, b: v4i32, c: v4i32) -> v4i32 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(vshf.d))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_vshf_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { msa_vshf_d(a, mem::transmute(b), c) } @@ -8654,6 +9189,7 @@ pub unsafe fn __msa_vshf_d(a: v2i64, b: v2i64, c: v2i64) -> v2i64 { #[inline] #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(xor.v))] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_xor_v(a: v16u8, b: v16u8) -> v16u8 { msa_xor_v(a, mem::transmute(b)) } @@ -8669,6 +9205,7 @@ pub unsafe fn __msa_xor_v(a: v16u8, b: v16u8) -> v16u8 { #[target_feature(enable = "msa")] #[cfg_attr(test, assert_instr(xori.b, imm8 = 0b11111111))] #[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_mips", issue = "111198")] pub unsafe fn __msa_xori_b(a: v16u8) -> v16u8 { static_assert_uimm_bits!(IMM8, 8); msa_xori_b(a, IMM8) diff --git a/crates/core_arch/src/mod.rs b/crates/core_arch/src/mod.rs index ad3ec863d4..e72d9abf9c 100644 --- a/crates/core_arch/src/mod.rs +++ b/crates/core_arch/src/mod.rs @@ -43,8 +43,9 @@ pub mod arch { /// See the [module documentation](../index.html) for more details. #[cfg(any(target_arch = "arm", doc))] #[doc(cfg(target_arch = "arm"))] - #[unstable(feature = "stdsimd", issue = "27731")] + #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub mod arm { + #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] pub use crate::core_arch::arm::*; } @@ -218,7 +219,7 @@ pub mod arch { /// See the [module documentation](../index.html) for more details. #[cfg(any(target_arch = "mips", doc))] #[doc(cfg(target_arch = "mips"))] - #[unstable(feature = "stdsimd", issue = "27731")] + #[unstable(feature = "stdarch_mips", issue = "111198")] pub mod mips { pub use crate::core_arch::mips::*; } @@ -228,7 +229,7 @@ pub mod arch { /// See the [module documentation](../index.html) for more details. #[cfg(any(target_arch = "mips64", doc))] #[doc(cfg(target_arch = "mips64"))] - #[unstable(feature = "stdsimd", issue = "27731")] + #[unstable(feature = "stdarch_mips", issue = "111198")] pub mod mips64 { pub use crate::core_arch::mips::*; } @@ -238,7 +239,7 @@ pub mod arch { /// See the [module documentation](../index.html) for more details. #[cfg(any(target_arch = "powerpc", doc))] #[doc(cfg(target_arch = "powerpc"))] - #[unstable(feature = "stdsimd", issue = "27731")] + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub mod powerpc { pub use crate::core_arch::powerpc::*; } @@ -248,7 +249,7 @@ pub mod arch { /// See the [module documentation](../index.html) for more details. #[cfg(any(target_arch = "powerpc64", doc))] #[doc(cfg(target_arch = "powerpc64"))] - #[unstable(feature = "stdsimd", issue = "27731")] + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub mod powerpc64 { pub use crate::core_arch::powerpc64::*; } @@ -258,7 +259,7 @@ pub mod arch { /// See the [module documentation](../index.html) for more details. #[cfg(any(target_arch = "nvptx64", doc))] #[doc(cfg(target_arch = "nvptx64"))] - #[unstable(feature = "stdsimd", issue = "27731")] + #[unstable(feature = "stdarch_nvptx", issue = "111199")] pub mod nvptx { pub use crate::core_arch::nvptx::*; } diff --git a/crates/core_arch/src/nvptx/mod.rs b/crates/core_arch/src/nvptx/mod.rs index bf6673f478..3df767cc7f 100644 --- a/crates/core_arch/src/nvptx/mod.rs +++ b/crates/core_arch/src/nvptx/mod.rs @@ -45,84 +45,98 @@ extern "C" { /// Synchronizes all threads in the block. #[inline] +#[unstable(feature = "stdarch_nvptx", issue = "111199")] pub unsafe fn _syncthreads() -> () { syncthreads() } /// x-th thread-block dimension. #[inline] +#[unstable(feature = "stdarch_nvptx", issue = "111199")] pub unsafe fn _block_dim_x() -> i32 { block_dim_x() } /// y-th thread-block dimension. #[inline] +#[unstable(feature = "stdarch_nvptx", issue = "111199")] pub unsafe fn _block_dim_y() -> i32 { block_dim_y() } /// z-th thread-block dimension. #[inline] +#[unstable(feature = "stdarch_nvptx", issue = "111199")] pub unsafe fn _block_dim_z() -> i32 { block_dim_z() } /// x-th thread-block index. #[inline] +#[unstable(feature = "stdarch_nvptx", issue = "111199")] pub unsafe fn _block_idx_x() -> i32 { block_idx_x() } /// y-th thread-block index. #[inline] +#[unstable(feature = "stdarch_nvptx", issue = "111199")] pub unsafe fn _block_idx_y() -> i32 { block_idx_y() } /// z-th thread-block index. #[inline] +#[unstable(feature = "stdarch_nvptx", issue = "111199")] pub unsafe fn _block_idx_z() -> i32 { block_idx_z() } /// x-th block-grid dimension. #[inline] +#[unstable(feature = "stdarch_nvptx", issue = "111199")] pub unsafe fn _grid_dim_x() -> i32 { grid_dim_x() } /// y-th block-grid dimension. #[inline] +#[unstable(feature = "stdarch_nvptx", issue = "111199")] pub unsafe fn _grid_dim_y() -> i32 { grid_dim_y() } /// z-th block-grid dimension. #[inline] +#[unstable(feature = "stdarch_nvptx", issue = "111199")] pub unsafe fn _grid_dim_z() -> i32 { grid_dim_z() } /// x-th thread index. #[inline] +#[unstable(feature = "stdarch_nvptx", issue = "111199")] pub unsafe fn _thread_idx_x() -> i32 { thread_idx_x() } /// y-th thread index. #[inline] +#[unstable(feature = "stdarch_nvptx", issue = "111199")] pub unsafe fn _thread_idx_y() -> i32 { thread_idx_y() } /// z-th thread index. #[inline] +#[unstable(feature = "stdarch_nvptx", issue = "111199")] pub unsafe fn _thread_idx_z() -> i32 { thread_idx_z() } /// Generates the trap instruction `TRAP` #[inline] +#[unstable(feature = "stdarch_nvptx", issue = "111199")] pub unsafe fn trap() -> ! { crate::intrinsics::abort() } @@ -149,6 +163,7 @@ extern "C" { /// Sources: /// [Programming Guide](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#formatted-output), /// [PTX Interoperability](https://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability/index.html#system-calls). + #[unstable(feature = "stdarch_nvptx", issue = "111199")] pub fn vprintf(format: *const u8, valist: *const c_void) -> i32; /// Allocate memory dynamically from a fixed-size heap in global memory. @@ -168,6 +183,7 @@ extern "C" { /// [Programming Guide](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#dynamic-global-memory-allocation-and-operations), /// [PTX Interoperability](https://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability/index.html#system-calls). // FIXME(denzp): assign `malloc` and `nothrow` attributes. + #[unstable(feature = "stdarch_nvptx", issue = "111199")] pub fn malloc(size: usize) -> *mut c_void; /// Free previously dynamically allocated memory. @@ -184,6 +200,7 @@ extern "C" { /// [Programming Guide](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#dynamic-global-memory-allocation-and-operations), /// [PTX Interoperability](https://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability/index.html#system-calls). // FIXME(denzp): assign `nothrow` attribute. + #[unstable(feature = "stdarch_nvptx", issue = "111199")] pub fn free(ptr: *mut c_void); // Internal declaration of the syscall. Exported variant has @@ -208,6 +225,7 @@ extern "C" { /// Source: /// [PTX Interoperability](https://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability/index.html#system-calls). #[inline] +#[unstable(feature = "stdarch_nvptx", issue = "111199")] pub unsafe fn __assert_fail(message: *const u8, file: *const u8, line: u32, function: *const u8) { __assertfail(message, file, line, function, 1) } diff --git a/crates/core_arch/src/powerpc/altivec.rs b/crates/core_arch/src/powerpc/altivec.rs index e94afa77d0..8ab09cfb6f 100644 --- a/crates/core_arch/src/powerpc/altivec.rs +++ b/crates/core_arch/src/powerpc/altivec.rs @@ -24,29 +24,39 @@ use stdarch_test::assert_instr; types! { /// PowerPC-specific 128-bit wide vector of sixteen packed `i8` + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub struct vector_signed_char(i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8); /// PowerPC-specific 128-bit wide vector of sixteen packed `u8` + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub struct vector_unsigned_char(u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8); /// PowerPC-specific 128-bit wide vector mask of sixteen packed elements + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub struct vector_bool_char(i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8); /// PowerPC-specific 128-bit wide vector of eight packed `i16` + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub struct vector_signed_short(i16, i16, i16, i16, i16, i16, i16, i16); /// PowerPC-specific 128-bit wide vector of eight packed `u16` + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub struct vector_unsigned_short(u16, u16, u16, u16, u16, u16, u16, u16); /// PowerPC-specific 128-bit wide vector mask of eight packed elements + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub struct vector_bool_short(i16, i16, i16, i16, i16, i16, i16, i16); // pub struct vector_pixel(???); /// PowerPC-specific 128-bit wide vector of four packed `i32` + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub struct vector_signed_int(i32, i32, i32, i32); /// PowerPC-specific 128-bit wide vector of four packed `u32` + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub struct vector_unsigned_int(u32, u32, u32, u32); /// PowerPC-specific 128-bit wide vector mask of four packed elements + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub struct vector_bool_int(i32, i32, i32, i32); /// PowerPC-specific 128-bit wide vector of four packed `f32` + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub struct vector_float(f32, f32, f32, f32); } @@ -381,6 +391,7 @@ macro_rules! t_t_l { macro_rules! impl_from { ($s: ident) => { + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl From<$s> for s_t_l!($s) { fn from (v: $s) -> Self { unsafe { @@ -400,6 +411,7 @@ impl_from! { i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, f32x4 } macro_rules! impl_neg { ($s: ident : $zero: expr) => { + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl crate::ops::Neg for s_t_l!($s) { type Output = s_t_l!($s); fn neg(self) -> Self::Output { @@ -447,6 +459,7 @@ mod sealed { } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorLd { type Result; unsafe fn vec_ld(self, off: isize) -> Self::Result; @@ -471,6 +484,7 @@ mod sealed { transmute(lvxl(addr)) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorLd for *const $ty { type Result = t_t_l!($ty); #[inline] @@ -498,6 +512,7 @@ mod sealed { impl_vec_ld! { vec_ld_f32 vec_ldl_f32 f32 } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorLde { type Result; unsafe fn vec_lde(self, a: isize) -> Self::Result; @@ -513,6 +528,7 @@ mod sealed { transmute($instr(addr)) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorLde for *const $ty { type Result = t_t_l!($ty); #[inline] @@ -535,6 +551,7 @@ mod sealed { impl_vec_lde! { vec_lde_f32 lvewx f32 } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorXl { type Result; unsafe fn vec_xl(self, a: isize) -> Self::Result; @@ -570,6 +587,7 @@ mod sealed { r.assume_init() } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorXl for *const $ty { type Result = t_t_l!($ty); #[inline] @@ -601,6 +619,7 @@ mod sealed { test_impl! { vec_vcmpgtsh(a: vector_signed_short, b: vector_signed_short) -> vector_bool_short [ vcmpgtsh, vcmpgtsh ] } test_impl! { vec_vcmpgtsw(a: vector_signed_int, b: vector_signed_int) -> vector_bool_int [ vcmpgtsw, vcmpgtsw ] } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorCmpGt { type Result; unsafe fn vec_cmpgt(self, b: Other) -> Self::Result; @@ -614,6 +633,7 @@ mod sealed { test_impl! { vec_vcmpequh(a: vector_unsigned_short, b: vector_unsigned_short) -> vector_bool_short [ vcmpequh, vcmpequh ] } test_impl! { vec_vcmpequw(a: vector_unsigned_int, b: vector_unsigned_int) -> vector_bool_int [ vcmpequw, vcmpequw ] } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorCmpEq { type Result; unsafe fn vec_cmpeq(self, b: Other) -> Self::Result; @@ -665,6 +685,7 @@ mod sealed { vcmpequw_p(1, a, b) != 0 } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorAllEq { type Result; unsafe fn vec_all_eq(self, b: Other) -> Self::Result; @@ -680,6 +701,7 @@ mod sealed { vcmpeqfp_p(2, a, b) != 0 } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAllEq for vector_float { type Result = bool; #[inline] @@ -688,6 +710,7 @@ mod sealed { } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorAnyEq { type Result; unsafe fn vec_any_eq(self, b: Other) -> Self::Result; @@ -702,6 +725,7 @@ mod sealed { vcmpeqfp_p(1, a, b) != 0 } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAnyEq for vector_float { type Result = bool; #[inline] @@ -796,6 +820,7 @@ mod sealed { vcmpgtuw_p(3, b, a) != 0 } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorAllGe { type Result; unsafe fn vec_all_ge(self, b: Other) -> Self::Result; @@ -815,6 +840,7 @@ mod sealed { vcmpgefp_p(2, a, b) != 0 } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAllGe for vector_float { type Result = bool; #[inline] @@ -823,6 +849,7 @@ mod sealed { } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorAnyGe { type Result; unsafe fn vec_any_ge(self, b: Other) -> Self::Result; @@ -841,6 +868,7 @@ mod sealed { vcmpgefp_p(1, a, b) != 0 } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAnyGe for vector_float { type Result = bool; #[inline] @@ -935,6 +963,7 @@ mod sealed { vcmpgtuw_p(1, a, b) != 0 } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorAllGt { type Result; unsafe fn vec_all_gt(self, b: Other) -> Self::Result; @@ -954,6 +983,7 @@ mod sealed { vcmpgtfp_p(2, a, b) != 0 } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAllGt for vector_float { type Result = bool; #[inline] @@ -962,6 +992,7 @@ mod sealed { } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorAnyGt { type Result; unsafe fn vec_any_gt(self, b: Other) -> Self::Result; @@ -980,6 +1011,7 @@ mod sealed { vcmpgtfp_p(1, a, b) != 0 } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAnyGt for vector_float { type Result = bool; #[inline] @@ -1032,6 +1064,7 @@ mod sealed { vcmpequw_p(3, a, b) != 0 } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorAllNe { type Result; unsafe fn vec_all_ne(self, b: Other) -> Self::Result; @@ -1047,6 +1080,7 @@ mod sealed { vcmpeqfp_p(0, a, b) != 0 } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAllNe for vector_float { type Result = bool; #[inline] @@ -1055,6 +1089,7 @@ mod sealed { } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorAnyNe { type Result; unsafe fn vec_any_ne(self, b: Other) -> Self::Result; @@ -1069,6 +1104,7 @@ mod sealed { vcmpeqfp_p(3, a, b) != 0 } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAnyNe for vector_float { type Result = bool; #[inline] @@ -1086,6 +1122,7 @@ mod sealed { test_impl! { vec_vavguh(a: vector_unsigned_short, b: vector_unsigned_short) -> vector_unsigned_short [ vavguh, vavguh ] } test_impl! { vec_vavguw(a: vector_unsigned_int, b: vector_unsigned_int) -> vector_unsigned_int [ vavguw, vavguw ] } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorAvg { type Result; unsafe fn vec_avg(self, b: Other) -> Self::Result; @@ -1103,6 +1140,7 @@ mod sealed { transmute(simd_and(simd_xor(u8x16::splat(0xff), b), a)) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorAndc { type Result; unsafe fn vec_andc(self, b: Other) -> Self::Result; @@ -1110,6 +1148,7 @@ mod sealed { macro_rules! impl_vec_andc { (($a:ty, $b:ty) -> $r:ty) => { + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAndc<$b> for $a { type Result = $r; #[inline] @@ -1135,6 +1174,7 @@ mod sealed { test_impl! { vec_vand(a: vector_signed_char, b: vector_signed_char) -> vector_signed_char [ simd_and, vand / xxland ] } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorAnd { type Result; unsafe fn vec_and(self, b: Other) -> Self::Result; @@ -1149,6 +1189,7 @@ mod sealed { test_impl! { vec_vadduhs(a: vector_unsigned_short, b: vector_unsigned_short) -> vector_unsigned_short [ vadduhs, vadduhs ] } test_impl! { vec_vadduws(a: vector_unsigned_int, b: vector_unsigned_int) -> vector_unsigned_int [ vadduws, vadduws ] } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorAdds { type Result; unsafe fn vec_adds(self, b: Other) -> Self::Result; @@ -1165,6 +1206,7 @@ mod sealed { test_impl! { vec_vsubuhs(a: vector_unsigned_short, b: vector_unsigned_short) -> vector_unsigned_short [ vsubuhs, vsubuhs ] } test_impl! { vec_vsubuws(a: vector_unsigned_int, b: vector_unsigned_int) -> vector_unsigned_int [ vsubuws, vsubuws ] } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorSubs { type Result; unsafe fn vec_subs(self, b: Other) -> Self::Result; @@ -1172,6 +1214,7 @@ mod sealed { impl_vec_trait! { [VectorSubs vec_subs] ~(vsububs, vsubsbs, vsubuhs, vsubshs, vsubuws, vsubsws) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorAbs { unsafe fn vec_abs(self) -> Self; } @@ -1202,6 +1245,7 @@ mod sealed { impl_vec_trait! { [VectorAbs vec_abs] vec_abs_f32 (vector_float) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorAbss { unsafe fn vec_abss(self) -> Self; } @@ -1261,12 +1305,14 @@ mod sealed { vec_perm(a, a, transmute(b)) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorSplat { unsafe fn vec_splat(self) -> Self; } macro_rules! impl_vec_splat { ($ty:ty, $fun:ident) => { + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorSplat for $ty { #[inline] #[target_feature(enable = "altivec")] @@ -1293,6 +1339,7 @@ mod sealed { #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr($instr, IMM5 = 1))] + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn $name() -> s_t_l!($r) { static_assert_simm_bits!(IMM5, 5); transmute($r::splat(IMM5 as $v)) @@ -1326,6 +1373,7 @@ mod sealed { test_impl! { vec_splats_i32 (v: i32) -> vector_signed_int [splats_i32, vspltw / xxspltw] } test_impl! { vec_splats_f32 (v: f32) -> vector_float [splats_f32, vspltw / xxspltw] } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorSplats { type Result; unsafe fn vec_splats(self) -> Self::Result; @@ -1353,6 +1401,7 @@ mod sealed { test_impl! { vec_vsubuhm (a: vector_unsigned_short, b: vector_unsigned_short) -> vector_unsigned_short [simd_sub, vsubuhm] } test_impl! { vec_vsubuwm (a: vector_unsigned_int, b: vector_unsigned_int) -> vector_unsigned_int [simd_sub, vsubuwm] } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorSub { type Result; unsafe fn vec_sub(self, b: Other) -> Self::Result; @@ -1369,6 +1418,7 @@ mod sealed { test_impl! { vec_vminuh (a: vector_unsigned_short, b: vector_unsigned_short) -> vector_unsigned_short [vminuh, vminuh] } test_impl! { vec_vminuw (a: vector_unsigned_int, b: vector_unsigned_int) -> vector_unsigned_int [vminuw, vminuw] } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorMin { type Result; unsafe fn vec_min(self, b: Other) -> Self::Result; @@ -1384,6 +1434,7 @@ mod sealed { test_impl! { vec_vmaxuh (a: vector_unsigned_short, b: vector_unsigned_short) -> vector_unsigned_short [vmaxuh, vmaxuh] } test_impl! { vec_vmaxuw (a: vector_unsigned_int, b: vector_unsigned_int) -> vector_unsigned_int [vmaxuw, vmaxuw] } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorMax { type Result; unsafe fn vec_max(self, b: Other) -> Self::Result; @@ -1422,10 +1473,12 @@ mod sealed { vmulesh(a, b) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorMule { unsafe fn vec_mule(self, b: Self) -> Result; } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorMule for vector_unsigned_char { #[inline] #[target_feature(enable = "altivec")] @@ -1433,6 +1486,7 @@ mod sealed { vmuleub(self, b) } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorMule for vector_signed_char { #[inline] #[target_feature(enable = "altivec")] @@ -1440,6 +1494,7 @@ mod sealed { vmulesb(self, b) } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorMule for vector_unsigned_short { #[inline] #[target_feature(enable = "altivec")] @@ -1447,6 +1502,7 @@ mod sealed { vmuleuh(self, b) } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorMule for vector_signed_short { #[inline] #[target_feature(enable = "altivec")] @@ -1486,10 +1542,12 @@ mod sealed { vmulosh(a, b) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorMulo { unsafe fn vec_mulo(self, b: Self) -> Result; } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorMulo for vector_unsigned_char { #[inline] #[target_feature(enable = "altivec")] @@ -1497,6 +1555,7 @@ mod sealed { vmuloub(self, b) } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorMulo for vector_signed_char { #[inline] #[target_feature(enable = "altivec")] @@ -1504,6 +1563,7 @@ mod sealed { vmulosb(self, b) } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorMulo for vector_unsigned_short { #[inline] #[target_feature(enable = "altivec")] @@ -1511,6 +1571,7 @@ mod sealed { vmulouh(self, b) } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorMulo for vector_signed_short { #[inline] #[target_feature(enable = "altivec")] @@ -1540,10 +1601,12 @@ mod sealed { vsum4shs(a, b) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorSum4s { unsafe fn vec_sum4s(self, b: Other) -> Other; } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorSum4s for vector_unsigned_char { #[inline] #[target_feature(enable = "altivec")] @@ -1552,6 +1615,7 @@ mod sealed { } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorSum4s for vector_signed_char { #[inline] #[target_feature(enable = "altivec")] @@ -1560,6 +1624,7 @@ mod sealed { } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorSum4s for vector_signed_short { #[inline] #[target_feature(enable = "altivec")] @@ -1633,10 +1698,12 @@ mod sealed { vmsumshm(a, b, c) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorMsum { unsafe fn vec_msum(self, b: B, c: Other) -> Other; } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorMsum for vector_unsigned_char { #[inline] #[target_feature(enable = "altivec")] @@ -1649,6 +1716,7 @@ mod sealed { } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorMsum for vector_signed_char { #[inline] #[target_feature(enable = "altivec")] @@ -1661,6 +1729,7 @@ mod sealed { } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorMsum for vector_unsigned_short { #[inline] #[target_feature(enable = "altivec")] @@ -1673,6 +1742,7 @@ mod sealed { } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorMsum for vector_signed_short { #[inline] #[target_feature(enable = "altivec")] @@ -1707,10 +1777,12 @@ mod sealed { vmsumshs(a, b, c) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorMsums { unsafe fn vec_msums(self, b: Self, c: Other) -> Other; } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorMsums for vector_unsigned_short { #[inline] #[target_feature(enable = "altivec")] @@ -1719,6 +1791,7 @@ mod sealed { } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorMsums for vector_signed_short { #[inline] #[target_feature(enable = "altivec")] @@ -1738,12 +1811,14 @@ mod sealed { vperm(a, b, c) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorPerm { unsafe fn vec_vperm(self, b: Self, c: vector_unsigned_char) -> Self; } macro_rules! vector_perm { {$impl: ident} => { + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorPerm for $impl { #[inline] #[target_feature(enable = "altivec")] @@ -1768,6 +1843,7 @@ mod sealed { vector_perm! { vector_float } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorAdd { type Result; unsafe fn vec_add(self, other: Other) -> Self::Result; @@ -1779,6 +1855,7 @@ mod sealed { pub unsafe fn vec_add_bc_sc(a: vector_bool_char, b: vector_signed_char) -> vector_signed_char { simd_add(transmute(a), b) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_bool_char { type Result = vector_signed_char; #[inline] @@ -1787,6 +1864,7 @@ mod sealed { vec_add_bc_sc(self, other) } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_signed_char { type Result = vector_signed_char; #[inline] @@ -1805,6 +1883,7 @@ mod sealed { ) -> vector_signed_char { simd_add(a, b) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_signed_char { type Result = vector_signed_char; #[inline] @@ -1823,6 +1902,7 @@ mod sealed { ) -> vector_unsigned_char { simd_add(transmute(a), b) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_bool_char { type Result = vector_unsigned_char; #[inline] @@ -1831,6 +1911,7 @@ mod sealed { vec_add_bc_uc(self, other) } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_unsigned_char { type Result = vector_unsigned_char; #[inline] @@ -1849,6 +1930,7 @@ mod sealed { ) -> vector_unsigned_char { simd_add(a, b) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_unsigned_char { type Result = vector_unsigned_char; #[inline] @@ -1870,6 +1952,7 @@ mod sealed { simd_add(a, b) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_bool_short { type Result = vector_signed_short; #[inline] @@ -1878,6 +1961,7 @@ mod sealed { vec_add_bs_ss(self, other) } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_signed_short { type Result = vector_signed_short; #[inline] @@ -1896,6 +1980,7 @@ mod sealed { ) -> vector_signed_short { simd_add(a, b) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_signed_short { type Result = vector_signed_short; #[inline] @@ -1916,6 +2001,7 @@ mod sealed { let a: vector_unsigned_short = simd_cast(a); simd_add(a, b) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_bool_short { type Result = vector_unsigned_short; #[inline] @@ -1924,6 +2010,7 @@ mod sealed { vec_add_bs_us(self, other) } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_unsigned_short { type Result = vector_unsigned_short; #[inline] @@ -1943,6 +2030,7 @@ mod sealed { simd_add(a, b) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_unsigned_short { type Result = vector_unsigned_short; #[inline] @@ -1960,6 +2048,7 @@ mod sealed { let a: vector_signed_int = simd_cast(a); simd_add(a, b) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_bool_int { type Result = vector_signed_int; #[inline] @@ -1968,6 +2057,7 @@ mod sealed { vec_add_bi_si(self, other) } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_signed_int { type Result = vector_signed_int; #[inline] @@ -1983,6 +2073,7 @@ mod sealed { pub unsafe fn vec_add_si_si(a: vector_signed_int, b: vector_signed_int) -> vector_signed_int { simd_add(a, b) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_signed_int { type Result = vector_signed_int; #[inline] @@ -2000,6 +2091,7 @@ mod sealed { let a: vector_unsigned_int = simd_cast(a); simd_add(a, b) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_bool_int { type Result = vector_unsigned_int; #[inline] @@ -2008,6 +2100,7 @@ mod sealed { vec_add_bi_ui(self, other) } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_unsigned_int { type Result = vector_unsigned_int; #[inline] @@ -2026,6 +2119,7 @@ mod sealed { ) -> vector_unsigned_int { simd_add(a, b) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_unsigned_int { type Result = vector_unsigned_int; #[inline] @@ -2042,6 +2136,7 @@ mod sealed { simd_add(a, b) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorAdd for vector_float { type Result = vector_float; #[inline] @@ -2051,6 +2146,7 @@ mod sealed { } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorMladd { type Result; unsafe fn vec_mladd(self, b: Other, c: Other) -> Self::Result; @@ -2072,6 +2168,7 @@ mod sealed { macro_rules! vector_mladd { ($a: ident, $bc: ident, $d: ident) => { + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorMladd<$bc> for $a { type Result = $d; #[inline] @@ -2092,6 +2189,7 @@ mod sealed { vector_mladd! { vector_signed_short, vector_unsigned_short, vector_signed_short } vector_mladd! { vector_signed_short, vector_signed_short, vector_signed_short } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorOr { type Result; unsafe fn vec_or(self, b: Other) -> Self::Result; @@ -2099,6 +2197,7 @@ mod sealed { impl_vec_trait! { [VectorOr vec_or] ~(simd_or) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorXor { type Result; unsafe fn vec_xor(self, b: Other) -> Self::Result; @@ -2126,6 +2225,7 @@ mod sealed { vector_vnor! { vec_vnoruh u16 } vector_vnor! { vec_vnoruw u32 } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorNor { type Result; unsafe fn vec_nor(self, b: Other) -> Self::Result; @@ -2149,16 +2249,19 @@ mod sealed { vcfux(a, IMM5) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorCtf { unsafe fn vec_ctf(self) -> vector_float; } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorCtf for vector_signed_int { unsafe fn vec_ctf(self) -> vector_float { vec_ctf_i32::(self) } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorCtf for vector_unsigned_int { unsafe fn vec_ctf(self) -> vector_float { vec_ctf_u32::(self) @@ -2265,6 +2368,7 @@ mod sealed { vec_perm(a, b, mergel_perm) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorMergeh { type Result; unsafe fn vec_mergeh(self, b: Other) -> Self::Result; @@ -2273,6 +2377,7 @@ mod sealed { impl_vec_trait! { [VectorMergeh vec_mergeh]+ 2b (vec_vmrghb, vec_vmrghh, vec_vmrghw) } impl_vec_trait! { [VectorMergeh vec_mergeh]+ vec_vmrghw (vector_float, vector_float) -> vector_float } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorMergel { type Result; unsafe fn vec_mergel(self, b: Other) -> Self::Result; @@ -2319,6 +2424,7 @@ mod sealed { transmute(vec_perm(a, b, pack_perm)) } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorPack { type Result; unsafe fn vec_pack(self, b: Other) -> Self::Result; @@ -2400,6 +2506,7 @@ mod sealed { } } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorPacks { type Result; unsafe fn vec_packs(self, b: Other) -> Self::Result; @@ -2410,6 +2517,7 @@ mod sealed { impl_vec_trait! { [VectorPacks vec_packs] vec_vpkswss (vector_signed_int, vector_signed_int) -> vector_signed_short } impl_vec_trait! { [VectorPacks vec_packs] vec_vpkuwus (vector_unsigned_int, vector_unsigned_int) -> vector_unsigned_short } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorPacksu { type Result; unsafe fn vec_packsu(self, b: Other) -> Self::Result; @@ -2441,6 +2549,7 @@ mod sealed { impl_vec_unpack! { vec_vupkhsh (vector_signed_short) -> vector_signed_int [vupklsh, vupkhsh] } impl_vec_unpack! { vec_vupklsh (vector_signed_short) -> vector_signed_int [vupkhsh, vupklsh] } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorUnpackh { type Result; unsafe fn vec_unpackh(self) -> Self::Result; @@ -2451,6 +2560,7 @@ mod sealed { impl_vec_trait! { [VectorUnpackh vec_unpackh] vec_vupkhsh (vector_signed_short) -> vector_signed_int } impl_vec_trait! { [VectorUnpackh vec_unpackh]+ vec_vupkhsh (vector_bool_short) -> vector_bool_int } + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorUnpackl { type Result; unsafe fn vec_unpackl(self) -> Self::Result; @@ -2465,6 +2575,7 @@ mod sealed { /// Vector Merge Low #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_mergel(a: T, b: U) -> >::Result where T: sealed::VectorMergel, @@ -2475,6 +2586,7 @@ where /// Vector Merge High #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_mergeh(a: T, b: U) -> >::Result where T: sealed::VectorMergeh, @@ -2485,6 +2597,7 @@ where /// Vector Pack #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_pack(a: T, b: U) -> >::Result where T: sealed::VectorPack, @@ -2495,6 +2608,7 @@ where /// Vector Pack Saturated #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_packs(a: T, b: U) -> >::Result where T: sealed::VectorPacks, @@ -2505,6 +2619,7 @@ where /// Vector Pack Saturated Unsigned #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_packsu(a: T, b: U) -> >::Result where T: sealed::VectorPacksu, @@ -2515,6 +2630,7 @@ where /// Vector Unpack High #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_unpackh(a: T) -> ::Result where T: sealed::VectorUnpackh, @@ -2525,6 +2641,7 @@ where /// Vector Unpack Low #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_unpackl(a: T) -> ::Result where T: sealed::VectorUnpackl, @@ -2535,6 +2652,7 @@ where /// Vector Load Indexed. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_ld(off: isize, p: T) -> ::Result where T: sealed::VectorLd, @@ -2545,6 +2663,7 @@ where /// Vector Load Indexed Least Recently Used. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_ldl(off: isize, p: T) -> ::Result where T: sealed::VectorLd, @@ -2555,6 +2674,7 @@ where /// Vector Load Element Indexed. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_lde(off: isize, p: T) -> ::Result where T: sealed::VectorLde, @@ -2565,6 +2685,7 @@ where /// VSX Unaligned Load #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_xl(off: isize, p: T) -> ::Result where T: sealed::VectorXl, @@ -2576,6 +2697,7 @@ where #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr(vlogefp))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_loge(a: vector_float) -> vector_float { vlogefp(a) } @@ -2583,6 +2705,7 @@ pub unsafe fn vec_loge(a: vector_float) -> vector_float { /// Vector floor. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_floor(a: vector_float) -> vector_float { sealed::vec_floor(a) } @@ -2590,6 +2713,7 @@ pub unsafe fn vec_floor(a: vector_float) -> vector_float { /// Vector expte. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_expte(a: vector_float) -> vector_float { sealed::vec_vexptefp(a) } @@ -2597,6 +2721,7 @@ pub unsafe fn vec_expte(a: vector_float) -> vector_float { /// Vector cmplt. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_cmplt(a: U, b: T) -> >::Result where T: sealed::VectorCmpGt, @@ -2607,6 +2732,7 @@ where /// Vector cmple. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_cmple(a: vector_float, b: vector_float) -> vector_bool_int { vec_cmpge(b, a) } @@ -2614,6 +2740,7 @@ pub unsafe fn vec_cmple(a: vector_float, b: vector_float) -> vector_bool_int { /// Vector cmpgt. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_cmpgt(a: T, b: U) -> >::Result where T: sealed::VectorCmpGt, @@ -2624,6 +2751,7 @@ where /// Vector cmpge. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_cmpge(a: vector_float, b: vector_float) -> vector_bool_int { sealed::vec_vcmpgefp(a, b) } @@ -2631,6 +2759,7 @@ pub unsafe fn vec_cmpge(a: vector_float, b: vector_float) -> vector_bool_int { /// Vector cmpeq. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_cmpeq(a: T, b: U) -> >::Result where T: sealed::VectorCmpEq, @@ -2641,6 +2770,7 @@ where /// Vector cmpb. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_cmpb(a: vector_float, b: vector_float) -> vector_signed_int { sealed::vec_vcmpbfp(a, b) } @@ -2648,6 +2778,7 @@ pub unsafe fn vec_cmpb(a: vector_float, b: vector_float) -> vector_signed_int { /// Vector ceil. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_ceil(a: vector_float) -> vector_float { sealed::vec_vceil(a) } @@ -2655,6 +2786,7 @@ pub unsafe fn vec_ceil(a: vector_float) -> vector_float { /// Vector avg. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_avg(a: T, b: U) -> >::Result where T: sealed::VectorAvg, @@ -2665,6 +2797,7 @@ where /// Vector andc. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_andc(a: T, b: U) -> >::Result where T: sealed::VectorAndc, @@ -2675,6 +2808,7 @@ where /// Vector and. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_and(a: T, b: U) -> >::Result where T: sealed::VectorAnd, @@ -2685,6 +2819,7 @@ where /// Vector or. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_or(a: T, b: U) -> >::Result where T: sealed::VectorOr, @@ -2695,6 +2830,7 @@ where /// Vector nor. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_nor(a: T, b: U) -> >::Result where T: sealed::VectorNor, @@ -2705,6 +2841,7 @@ where /// Vector xor. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_xor(a: T, b: U) -> >::Result where T: sealed::VectorXor, @@ -2715,6 +2852,7 @@ where /// Vector adds. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_adds(a: T, b: U) -> >::Result where T: sealed::VectorAdds, @@ -2725,6 +2863,7 @@ where /// Vector addc. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_addc(a: vector_unsigned_int, b: vector_unsigned_int) -> vector_unsigned_int { sealed::vec_vaddcuw(a, b) } @@ -2732,6 +2871,7 @@ pub unsafe fn vec_addc(a: vector_unsigned_int, b: vector_unsigned_int) -> vector /// Vector abs. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_abs(a: T) -> T where T: sealed::VectorAbs, @@ -2742,6 +2882,7 @@ where /// Vector abss. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_abss(a: T) -> T where T: sealed::VectorAbss, @@ -2752,6 +2893,7 @@ where /// Vector Splat #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_splat(a: T) -> T where T: sealed::VectorSplat, @@ -2769,6 +2911,7 @@ splat! { vec_splat_i32, i32, i32x4 [vspltisw, "Vector Splat to Signed Word"] } /// Vector splats. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_splats(a: T) -> ::Result where T: sealed::VectorSplats, @@ -2779,6 +2922,7 @@ where /// Vector sub. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_sub(a: T, b: U) -> >::Result where T: sealed::VectorSub, @@ -2789,6 +2933,7 @@ where /// Vector subs. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_subs(a: T, b: U) -> >::Result where T: sealed::VectorSubs, @@ -2799,6 +2944,7 @@ where /// Vector min. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_min(a: T, b: U) -> >::Result where T: sealed::VectorMin, @@ -2809,6 +2955,7 @@ where /// Vector max. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_max(a: T, b: U) -> >::Result where T: sealed::VectorMax, @@ -2820,6 +2967,7 @@ where #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr(mfvscr))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_mfvscr() -> vector_unsigned_short { mfvscr() } @@ -2827,6 +2975,7 @@ pub unsafe fn vec_mfvscr() -> vector_unsigned_short { /// Vector add. #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_add(a: T, b: U) -> >::Result where T: sealed::VectorAdd, @@ -2837,6 +2986,7 @@ where /// Vector Convert to Floating-Point #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_ctf(a: T) -> vector_float where T: sealed::VectorCtf, @@ -2848,6 +2998,7 @@ where #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr(vctsxs, IMM5 = 1))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_cts(a: vector_float) -> vector_signed_int { static_assert_uimm_bits!(IMM5, 5); @@ -2858,6 +3009,7 @@ pub unsafe fn vec_cts(a: vector_float) -> vector_signed_int { #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr(vctuxs, IMM5 = 1))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_ctu(a: vector_float) -> vector_unsigned_int { static_assert_uimm_bits!(IMM5, 5); @@ -2871,6 +3023,7 @@ mod endian { /// Vector permute. #[inline] #[target_feature(enable = "altivec")] + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_perm(a: T, b: T, c: vector_unsigned_char) -> T where T: sealed::VectorPerm, @@ -2888,6 +3041,7 @@ mod endian { /// Vector Sum Across Partial (1/2) Saturated #[inline] + #[unstable(feature = "stdarch_powerpc", issue = "111145")] #[target_feature(enable = "altivec")] pub unsafe fn vec_sum2s(a: vector_signed_int, b: vector_signed_int) -> vector_signed_int { // vsum2sws has big-endian bias @@ -2906,6 +3060,7 @@ mod endian { /// Vector Multiply Even #[inline] #[target_feature(enable = "altivec")] + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_mule(a: T, b: T) -> U where T: sealed::VectorMulo, @@ -2915,6 +3070,7 @@ mod endian { /// Vector Multiply Odd #[inline] #[target_feature(enable = "altivec")] + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_mulo(a: T, b: T) -> U where T: sealed::VectorMule, @@ -2927,6 +3083,7 @@ mod endian { #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr(vmhaddshs))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_madds( a: vector_signed_short, b: vector_signed_short, @@ -2938,6 +3095,7 @@ pub unsafe fn vec_madds( /// Vector Multiply Low and Add Unsigned Half Word #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_mladd(a: T, b: U, c: U) -> >::Result where T: sealed::VectorMladd, @@ -2949,6 +3107,7 @@ where #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr(vmhraddshs))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_mradds( a: vector_signed_short, b: vector_signed_short, @@ -2960,6 +3119,7 @@ pub unsafe fn vec_mradds( /// Vector Multiply Sum #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_msum(a: T, b: B, c: U) -> U where T: sealed::VectorMsum, @@ -2970,6 +3130,7 @@ where /// Vector Multiply Sum Saturated #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_msums(a: T, b: T, c: U) -> U where T: sealed::VectorMsums, @@ -2980,6 +3141,7 @@ where /// Vector Multiply Add #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_madd(a: vector_float, b: vector_float, c: vector_float) -> vector_float { vmaddfp(a, b, c) } @@ -2987,6 +3149,7 @@ pub unsafe fn vec_madd(a: vector_float, b: vector_float, c: vector_float) -> vec /// Vector Negative Multiply Subtract #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_nmsub(a: vector_float, b: vector_float, c: vector_float) -> vector_float { vnmsubfp(a, b, c) } @@ -2994,6 +3157,7 @@ pub unsafe fn vec_nmsub(a: vector_float, b: vector_float, c: vector_float) -> ve /// Vector Sum Across Partial (1/4) Saturated #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_sum4s(a: T, b: U) -> U where T: sealed::VectorSum4s, @@ -3004,6 +3168,7 @@ where /// Vector All Elements Equal #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_all_eq(a: T, b: U) -> >::Result where T: sealed::VectorAllEq, @@ -3014,6 +3179,7 @@ where /// Vector All Elements Equal #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_any_eq(a: T, b: U) -> >::Result where T: sealed::VectorAnyEq, @@ -3024,6 +3190,7 @@ where /// Vector All Elements Greater or Equal #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_all_ge(a: T, b: U) -> >::Result where T: sealed::VectorAllGe, @@ -3034,6 +3201,7 @@ where /// Vector Any Element Greater or Equal #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_any_ge(a: T, b: U) -> >::Result where T: sealed::VectorAnyGe, @@ -3044,6 +3212,7 @@ where /// Vector All Elements Greater Than #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_all_gt(a: T, b: U) -> >::Result where T: sealed::VectorAllGt, @@ -3054,6 +3223,7 @@ where /// Vector Any Element Greater Than #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_any_gt(a: T, b: U) -> >::Result where T: sealed::VectorAnyGt, @@ -3065,6 +3235,7 @@ where #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr("vcmpbfp."))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_all_in(a: vector_float, b: vector_float) -> bool { vcmpbfp_p(0, a, b) != 0 } @@ -3072,6 +3243,7 @@ pub unsafe fn vec_all_in(a: vector_float, b: vector_float) -> bool { /// Vector All Elements Less Than or Equal #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_all_le(a: U, b: T) -> >::Result where T: sealed::VectorAllGe, @@ -3082,6 +3254,7 @@ where /// Vector Any Element Less Than or Equal #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_any_le(a: U, b: T) -> >::Result where T: sealed::VectorAnyGe, @@ -3092,6 +3265,7 @@ where /// Vector All Elements Less Than #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_all_lt(a: U, b: T) -> >::Result where T: sealed::VectorAllGt, @@ -3102,6 +3276,7 @@ where /// Vector Any Element Less Than #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_any_lt(a: U, b: T) -> >::Result where T: sealed::VectorAnyGt, @@ -3113,6 +3288,7 @@ where #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr("vcmpeqfp."))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_all_nan(a: vector_float) -> bool { vcmpeqfp_p(0, a, a) != 0 } @@ -3121,6 +3297,7 @@ pub unsafe fn vec_all_nan(a: vector_float) -> bool { #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr("vcmpeqfp."))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_any_nan(a: vector_float) -> bool { vcmpeqfp_p(3, a, a) != 0 } @@ -3128,6 +3305,7 @@ pub unsafe fn vec_any_nan(a: vector_float) -> bool { /// Vector All Elements Not Equal #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_all_ne(a: T, b: U) -> >::Result where T: sealed::VectorAllNe, @@ -3138,6 +3316,7 @@ where /// Vector Any Elements Not Equal #[inline] #[target_feature(enable = "altivec")] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_any_ne(a: T, b: U) -> >::Result where T: sealed::VectorAnyNe, @@ -3149,6 +3328,7 @@ where #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr("vcmpgefp."))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_all_nge(a: vector_float, b: vector_float) -> bool { vcmpgefp_p(0, a, b) != 0 } @@ -3157,6 +3337,7 @@ pub unsafe fn vec_all_nge(a: vector_float, b: vector_float) -> bool { #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr("vcmpgtfp."))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_all_ngt(a: vector_float, b: vector_float) -> bool { vcmpgtfp_p(0, a, b) != 0 } @@ -3165,6 +3346,7 @@ pub unsafe fn vec_all_ngt(a: vector_float, b: vector_float) -> bool { #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr("vcmpgefp."))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_all_nle(a: vector_float, b: vector_float) -> bool { vcmpgefp_p(0, b, a) != 0 } @@ -3173,6 +3355,7 @@ pub unsafe fn vec_all_nle(a: vector_float, b: vector_float) -> bool { #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr("vcmpgtfp."))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_all_nlt(a: vector_float, b: vector_float) -> bool { vcmpgtfp_p(0, b, a) != 0 } @@ -3181,6 +3364,7 @@ pub unsafe fn vec_all_nlt(a: vector_float, b: vector_float) -> bool { #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr("vcmpgefp."))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_all_numeric(a: vector_float) -> bool { vcmpgefp_p(2, a, a) != 0 } @@ -3189,6 +3373,7 @@ pub unsafe fn vec_all_numeric(a: vector_float) -> bool { #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr("vcmpgefp."))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_any_nge(a: vector_float, b: vector_float) -> bool { vcmpgefp_p(3, a, b) != 0 } @@ -3197,6 +3382,7 @@ pub unsafe fn vec_any_nge(a: vector_float, b: vector_float) -> bool { #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr("vcmpgtfp."))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_any_ngt(a: vector_float, b: vector_float) -> bool { vcmpgtfp_p(3, a, b) != 0 } @@ -3205,6 +3391,7 @@ pub unsafe fn vec_any_ngt(a: vector_float, b: vector_float) -> bool { #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr("vcmpgefp."))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_any_nle(a: vector_float, b: vector_float) -> bool { vcmpgefp_p(3, b, a) != 0 } @@ -3213,6 +3400,7 @@ pub unsafe fn vec_any_nle(a: vector_float, b: vector_float) -> bool { #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr("vcmpgtfp."))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_any_nlt(a: vector_float, b: vector_float) -> bool { vcmpgtfp_p(3, b, a) != 0 } @@ -3221,6 +3409,7 @@ pub unsafe fn vec_any_nlt(a: vector_float, b: vector_float) -> bool { #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr("vcmpgefp."))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_any_numeric(a: vector_float) -> bool { vcmpgefp_p(1, a, a) != 0 } @@ -3229,6 +3418,7 @@ pub unsafe fn vec_any_numeric(a: vector_float) -> bool { #[inline] #[target_feature(enable = "altivec")] #[cfg_attr(test, assert_instr("vcmpeqfp."))] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_any_out(a: vector_float) -> bool { vcmpeqfp_p(1, a, a) != 0 } @@ -3239,6 +3429,7 @@ mod endian { /// Vector permute. #[inline] #[target_feature(enable = "altivec")] + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_perm(a: T, b: T, c: vector_unsigned_char) -> T where T: sealed::VectorPerm, @@ -3249,6 +3440,7 @@ mod endian { /// Vector Sum Across Partial (1/2) Saturated #[inline] #[target_feature(enable = "altivec")] + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_sum2s(a: vector_signed_int, b: vector_signed_int) -> vector_signed_int { vsum2sws(a, b) } @@ -3256,6 +3448,7 @@ mod endian { /// Vector Multiply Even #[inline] #[target_feature(enable = "altivec")] + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_mule(a: T, b: T) -> U where T: sealed::VectorMule, @@ -3265,6 +3458,7 @@ mod endian { /// Vector Multiply Odd #[inline] #[target_feature(enable = "altivec")] + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_mulo(a: T, b: T) -> U where T: sealed::VectorMulo, @@ -3273,6 +3467,7 @@ mod endian { } } +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub use self::endian::*; #[cfg(test)] diff --git a/crates/core_arch/src/powerpc/macros.rs b/crates/core_arch/src/powerpc/macros.rs index ba16689765..a4fcef1280 100644 --- a/crates/core_arch/src/powerpc/macros.rs +++ b/crates/core_arch/src/powerpc/macros.rs @@ -21,6 +21,7 @@ macro_rules! test_impl { #[allow(unknown_lints, unused_macro_rules)] macro_rules! impl_vec_trait { ([$Trait:ident $m:ident] $fun:ident ($a:ty)) => { + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl $Trait for $a { #[inline] #[target_feature(enable = "altivec")] @@ -30,6 +31,7 @@ macro_rules! impl_vec_trait { } }; ([$Trait:ident $m:ident] $fun:ident ($a:ty) -> $r:ty) => { + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl $Trait for $a { type Result = $r; #[inline] @@ -40,6 +42,7 @@ macro_rules! impl_vec_trait { } }; ([$Trait:ident $m:ident]+ $fun:ident ($a:ty) -> $r:ty) => { + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl $Trait for $a { type Result = $r; #[inline] @@ -59,6 +62,7 @@ macro_rules! impl_vec_trait { impl_vec_trait!{ [$Trait $m] $sf (vector_float) -> vector_float } }; ([$Trait:ident $m:ident] $fun:ident ($a:ty, $b:ty) -> $r:ty) => { + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl $Trait<$b> for $a { type Result = $r; #[inline] @@ -69,6 +73,7 @@ macro_rules! impl_vec_trait { } }; ([$Trait:ident $m:ident]+ $fun:ident ($a:ty, $b:ty) -> $r:ty) => { + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl $Trait<$b> for $a { type Result = $r; #[inline] diff --git a/crates/core_arch/src/powerpc/mod.rs b/crates/core_arch/src/powerpc/mod.rs index 753f84b0b9..2f6948c749 100644 --- a/crates/core_arch/src/powerpc/mod.rs +++ b/crates/core_arch/src/powerpc/mod.rs @@ -4,9 +4,11 @@ mod macros; mod altivec; +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub use self::altivec::*; mod vsx; +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub use self::vsx::*; #[cfg(test)] @@ -15,6 +17,7 @@ use stdarch_test::assert_instr; /// Generates the trap instruction `TRAP` #[cfg_attr(test, assert_instr(trap))] #[inline] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn trap() -> ! { crate::intrinsics::abort() } diff --git a/crates/core_arch/src/powerpc/vsx.rs b/crates/core_arch/src/powerpc/vsx.rs index f2ebc23b21..8faca3d87b 100644 --- a/crates/core_arch/src/powerpc/vsx.rs +++ b/crates/core_arch/src/powerpc/vsx.rs @@ -18,12 +18,16 @@ use crate::mem::transmute; types! { // pub struct vector_Float16 = f16x8; /// PowerPC-specific 128-bit wide vector of two packed `i64` + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub struct vector_signed_long(i64, i64); /// PowerPC-specific 128-bit wide vector of two packed `u64` + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub struct vector_unsigned_long(u64, u64); /// PowerPC-specific 128-bit wide vector mask of two `i64` + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub struct vector_bool_long(i64, i64); /// PowerPC-specific 128-bit wide vector of two packed `f64` + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub struct vector_double(f64, f64); // pub struct vector_signed_long_long = vector_signed_long; // pub struct vector_unsigned_long_long = vector_unsigned_long; @@ -36,7 +40,9 @@ mod sealed { use super::*; use crate::core_arch::simd::*; + #[unstable(feature = "stdarch_powerpc", issue = "111145")] pub trait VectorPermDI { + #[unstable(feature = "stdarch_powerpc", issue = "111145")] unsafe fn vec_xxpermdi(self, b: Self, dm: u8) -> Self; } @@ -59,6 +65,7 @@ mod sealed { macro_rules! vec_xxpermdi { {$impl: ident} => { + #[unstable(feature = "stdarch_powerpc", issue = "111145")] impl VectorPermDI for $impl { #[inline] #[target_feature(enable = "vsx")] @@ -79,6 +86,7 @@ mod sealed { #[inline] #[target_feature(enable = "vsx")] //#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub unsafe fn vec_xxpermdi(a: T, b: T) -> T where T: sealed::VectorPermDI, diff --git a/crates/core_arch/src/powerpc64/mod.rs b/crates/core_arch/src/powerpc64/mod.rs index 3990a0e8db..dac998df6b 100644 --- a/crates/core_arch/src/powerpc64/mod.rs +++ b/crates/core_arch/src/powerpc64/mod.rs @@ -5,4 +5,5 @@ //! //! [64-Bit ELF V2 ABI Specification - Power Architecture]: http://openpowerfoundation.org/wp-content/uploads/resources/leabi/leabi-20170510.pdf +#[unstable(feature = "stdarch_powerpc", issue = "111145")] pub use crate::core_arch::powerpc::*; diff --git a/crates/core_arch/src/riscv32/mod.rs b/crates/core_arch/src/riscv32/mod.rs index 0a8634c85e..7ff871227b 100644 --- a/crates/core_arch/src/riscv32/mod.rs +++ b/crates/core_arch/src/riscv32/mod.rs @@ -2,4 +2,5 @@ mod zk; +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub use zk::*; diff --git a/crates/core_arch/src/riscv32/zk.rs b/crates/core_arch/src/riscv32/zk.rs index 3767577724..15eecc2ea4 100644 --- a/crates/core_arch/src/riscv32/zk.rs +++ b/crates/core_arch/src/riscv32/zk.rs @@ -65,6 +65,7 @@ extern "unadjusted" { // See #1464 // #[cfg_attr(test, assert_instr(aes32esi, BS = 0))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn aes32esi(rs1: u32, rs2: u32) -> u32 { static_assert!(BS < 4); @@ -97,6 +98,7 @@ pub unsafe fn aes32esi(rs1: u32, rs2: u32) -> u32 { // See #1464 // #[cfg_attr(test, assert_instr(aes32esmi, BS = 0))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn aes32esmi(rs1: u32, rs2: u32) -> u32 { static_assert!(BS < 4); @@ -128,6 +130,7 @@ pub unsafe fn aes32esmi(rs1: u32, rs2: u32) -> u32 { // See #1464 // #[cfg_attr(test, assert_instr(aes32dsi, BS = 0))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn aes32dsi(rs1: u32, rs2: u32) -> u32 { static_assert!(BS < 4); @@ -160,6 +163,7 @@ pub unsafe fn aes32dsi(rs1: u32, rs2: u32) -> u32 { // See #1464 // #[cfg_attr(test, assert_instr(aes32dsmi, BS = 0))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn aes32dsmi(rs1: u32, rs2: u32) -> u32 { static_assert!(BS < 4); @@ -187,6 +191,7 @@ pub unsafe fn aes32dsmi(rs1: u32, rs2: u32) -> u32 { // See #1464 // #[cfg_attr(test, assert_instr(zip))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn zip(rs: u32) -> u32 { _zip(rs as i32) as u32 } @@ -209,6 +214,7 @@ pub unsafe fn zip(rs: u32) -> u32 { #[target_feature(enable = "zbkb")] #[cfg_attr(test, assert_instr(unzip))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn unzip(rs: u32) -> u32 { _unzip(rs as i32) as u32 } @@ -235,6 +241,7 @@ pub unsafe fn unzip(rs: u32) -> u32 { // See #1464 // #[cfg_attr(test, assert_instr(sha512sig0h))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sha512sig0h(rs1: u32, rs2: u32) -> u32 { _sha512sig0h(rs1 as i32, rs2 as i32) as u32 } @@ -261,6 +268,7 @@ pub unsafe fn sha512sig0h(rs1: u32, rs2: u32) -> u32 { // See #1464 // #[cfg_attr(test, assert_instr(sha512sig0l))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sha512sig0l(rs1: u32, rs2: u32) -> u32 { _sha512sig0l(rs1 as i32, rs2 as i32) as u32 } @@ -287,6 +295,7 @@ pub unsafe fn sha512sig0l(rs1: u32, rs2: u32) -> u32 { // See #1464 // #[cfg_attr(test, assert_instr(sha512sig1h))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sha512sig1h(rs1: u32, rs2: u32) -> u32 { _sha512sig1h(rs1 as i32, rs2 as i32) as u32 } @@ -312,6 +321,7 @@ pub unsafe fn sha512sig1h(rs1: u32, rs2: u32) -> u32 { #[target_feature(enable = "zknh")] #[cfg_attr(test, assert_instr(sha512sig1l))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sha512sig1l(rs1: u32, rs2: u32) -> u32 { _sha512sig1l(rs1 as i32, rs2 as i32) as u32 } @@ -337,6 +347,7 @@ pub unsafe fn sha512sig1l(rs1: u32, rs2: u32) -> u32 { // See #1464 // #[cfg_attr(test, assert_instr(sha512sum0r))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sha512sum0r(rs1: u32, rs2: u32) -> u32 { _sha512sum0r(rs1 as i32, rs2 as i32) as u32 } @@ -362,6 +373,7 @@ pub unsafe fn sha512sum0r(rs1: u32, rs2: u32) -> u32 { // See #1464 // #[cfg_attr(test, assert_instr(sha512sum1r))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sha512sum1r(rs1: u32, rs2: u32) -> u32 { _sha512sum1r(rs1 as i32, rs2 as i32) as u32 } diff --git a/crates/core_arch/src/riscv64/mod.rs b/crates/core_arch/src/riscv64/mod.rs index ad16d6c231..0e860f6f2a 100644 --- a/crates/core_arch/src/riscv64/mod.rs +++ b/crates/core_arch/src/riscv64/mod.rs @@ -3,6 +3,7 @@ use crate::arch::asm; mod zk; +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub use zk::*; /// Loads virtual machine memory by unsigned word integer @@ -16,6 +17,7 @@ pub use zk::*; /// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.WU` /// instruction which is effectively a dereference to any memory address. #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlv_wu(src: *const u32) -> u32 { let value: u32; asm!(".insn i 0x73, 0x4, {}, {}, 0x681", out(reg) value, in(reg) src, options(readonly, nostack)); @@ -33,6 +35,7 @@ pub unsafe fn hlv_wu(src: *const u32) -> u32 { /// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.D` /// instruction which is effectively a dereference to any memory address. #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlv_d(src: *const i64) -> i64 { let value: i64; asm!(".insn i 0x73, 0x4, {}, {}, 0x6C0", out(reg) value, in(reg) src, options(readonly, nostack)); @@ -48,6 +51,7 @@ pub unsafe fn hlv_d(src: *const i64) -> i64 { /// This function is unsafe for it accesses the virtual supervisor or user via a `HSV.D` /// instruction which is effectively a dereference to any memory address. #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hsv_d(dst: *mut i64, src: i64) { asm!(".insn r 0x73, 0x4, 0x37, x0, {}, {}", in(reg) dst, in(reg) src, options(nostack)); } diff --git a/crates/core_arch/src/riscv64/zk.rs b/crates/core_arch/src/riscv64/zk.rs index 9b403fc957..f89412fce7 100644 --- a/crates/core_arch/src/riscv64/zk.rs +++ b/crates/core_arch/src/riscv64/zk.rs @@ -55,6 +55,7 @@ extern "unadjusted" { #[target_feature(enable = "zkne")] #[cfg_attr(test, assert_instr(aes64es))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn aes64es(rs1: u64, rs2: u64) -> u64 { _aes64es(rs1 as i64, rs2 as i64) as u64 } @@ -78,6 +79,7 @@ pub unsafe fn aes64es(rs1: u64, rs2: u64) -> u64 { #[target_feature(enable = "zkne")] #[cfg_attr(test, assert_instr(aes64esm))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn aes64esm(rs1: u64, rs2: u64) -> u64 { _aes64esm(rs1 as i64, rs2 as i64) as u64 } @@ -101,6 +103,7 @@ pub unsafe fn aes64esm(rs1: u64, rs2: u64) -> u64 { #[target_feature(enable = "zknd")] #[cfg_attr(test, assert_instr(aes64ds))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn aes64ds(rs1: u64, rs2: u64) -> u64 { _aes64ds(rs1 as i64, rs2 as i64) as u64 } @@ -124,6 +127,7 @@ pub unsafe fn aes64ds(rs1: u64, rs2: u64) -> u64 { #[target_feature(enable = "zknd")] #[cfg_attr(test, assert_instr(aes64dsm))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn aes64dsm(rs1: u64, rs2: u64) -> u64 { _aes64dsm(rs1 as i64, rs2 as i64) as u64 } @@ -153,6 +157,7 @@ pub unsafe fn aes64dsm(rs1: u64, rs2: u64) -> u64 { #[rustc_legacy_const_generics(1)] #[cfg_attr(test, assert_instr(aes64ks1i, RNUM = 0))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn aes64ks1i(rs1: u64) -> u64 { static_assert!(RNUM <= 10); @@ -177,6 +182,7 @@ pub unsafe fn aes64ks1i(rs1: u64) -> u64 { #[target_feature(enable = "zkne", enable = "zknd")] #[cfg_attr(test, assert_instr(aes64ks2))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn aes64ks2(rs1: u64, rs2: u64) -> u64 { _aes64ks2(rs1 as i64, rs2 as i64) as u64 } @@ -201,6 +207,7 @@ pub unsafe fn aes64ks2(rs1: u64, rs2: u64) -> u64 { #[target_feature(enable = "zkne", enable = "zknd")] #[cfg_attr(test, assert_instr(aes64im))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn aes64im(rs1: u64) -> u64 { _aes64im(rs1 as i64) as u64 } @@ -224,6 +231,7 @@ pub unsafe fn aes64im(rs1: u64) -> u64 { #[target_feature(enable = "zknh")] #[cfg_attr(test, assert_instr(sha512sig0))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sha512sig0(rs1: u64) -> u64 { _sha512sig0(rs1 as i64) as u64 } @@ -247,6 +255,7 @@ pub unsafe fn sha512sig0(rs1: u64) -> u64 { #[target_feature(enable = "zknh")] #[cfg_attr(test, assert_instr(sha512sig1))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sha512sig1(rs1: u64) -> u64 { _sha512sig1(rs1 as i64) as u64 } @@ -270,6 +279,7 @@ pub unsafe fn sha512sig1(rs1: u64) -> u64 { #[target_feature(enable = "zknh")] #[cfg_attr(test, assert_instr(sha512sum0))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sha512sum0(rs1: u64) -> u64 { _sha512sum0(rs1 as i64) as u64 } @@ -293,6 +303,7 @@ pub unsafe fn sha512sum0(rs1: u64) -> u64 { #[target_feature(enable = "zknh")] #[cfg_attr(test, assert_instr(sha512sum1))] #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sha512sum1(rs1: u64) -> u64 { _sha512sum1(rs1 as i64) as u64 } diff --git a/crates/core_arch/src/riscv_shared/mod.rs b/crates/core_arch/src/riscv_shared/mod.rs index 14f6989d20..e75eaee1fe 100644 --- a/crates/core_arch/src/riscv_shared/mod.rs +++ b/crates/core_arch/src/riscv_shared/mod.rs @@ -4,9 +4,11 @@ mod p; mod zb; mod zk; -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub use p::*; +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub use zb::*; +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub use zk::*; use crate::arch::asm; @@ -16,7 +18,7 @@ use crate::arch::asm; /// The PAUSE instruction is a HINT that indicates the current hart's rate of instruction retirement /// should be temporarily reduced or paused. The duration of its effect must be bounded and may be zero. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn pause() { unsafe { asm!(".insn i 0x0F, 0, x0, x0, 0x010", options(nomem, nostack)) } } @@ -26,7 +28,7 @@ pub fn pause() { /// The NOP instruction does not change any architecturally visible state, except for /// advancing the `pc` and incrementing any applicable performance counters. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn nop() { unsafe { asm!("nop", options(nomem, nostack)) } } @@ -37,7 +39,7 @@ pub fn nop() { /// until an interrupt might need servicing. This instruction is a hint, /// and a legal implementation is to simply implement WFI as a NOP. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn wfi() { asm!("wfi", options(nomem, nostack)) } @@ -50,7 +52,7 @@ pub unsafe fn wfi() { /// FENCE.I does not ensure that other RISC-V harts' instruction fetches will observe the /// local hart's stores in a multiprocessor system. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn fence_i() { asm!("fence.i", options(nostack)) } @@ -64,7 +66,7 @@ pub unsafe fn fence_i() { /// virtual address in parameter `vaddr` and that match the address space identified by integer /// parameter `asid`, except for entries containing global mappings. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sfence_vma(vaddr: usize, asid: usize) { asm!("sfence.vma {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) } @@ -76,7 +78,7 @@ pub unsafe fn sfence_vma(vaddr: usize, asid: usize) { /// The fence also invalidates all address-translation cache entries that contain leaf page /// table entries corresponding to the virtual address in parameter `vaddr`, for all address spaces. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sfence_vma_vaddr(vaddr: usize) { asm!("sfence.vma {}, x0", in(reg) vaddr, options(nostack)) } @@ -90,7 +92,7 @@ pub unsafe fn sfence_vma_vaddr(vaddr: usize) { /// address-translation cache entries matching the address space identified by integer /// parameter `asid`, except for entries containing global mappings. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sfence_vma_asid(asid: usize) { asm!("sfence.vma x0, {}", in(reg) asid, options(nostack)) } @@ -101,7 +103,7 @@ pub unsafe fn sfence_vma_asid(asid: usize) { /// tables, for all address spaces. The fence also invalidates all address-translation cache entries, /// for all address spaces. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sfence_vma_all() { asm!("sfence.vma", options(nostack)) } @@ -111,7 +113,7 @@ pub unsafe fn sfence_vma_all() { /// This instruction invalidates any address-translation cache entries that an /// `SFENCE.VMA` instruction with the same values of `vaddr` and `asid` would invalidate. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sinval_vma(vaddr: usize, asid: usize) { // asm!("sinval.vma {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) asm!(".insn r 0x73, 0, 0x0B, x0, {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) @@ -122,7 +124,7 @@ pub unsafe fn sinval_vma(vaddr: usize, asid: usize) { /// This instruction invalidates any address-translation cache entries that an /// `SFENCE.VMA` instruction with the same values of `vaddr` and `asid` would invalidate. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sinval_vma_vaddr(vaddr: usize) { asm!(".insn r 0x73, 0, 0x0B, x0, {}, x0", in(reg) vaddr, options(nostack)) } @@ -132,7 +134,7 @@ pub unsafe fn sinval_vma_vaddr(vaddr: usize) { /// This instruction invalidates any address-translation cache entries that an /// `SFENCE.VMA` instruction with the same values of `vaddr` and `asid` would invalidate. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sinval_vma_asid(asid: usize) { asm!(".insn r 0x73, 0, 0x0B, x0, x0, {}", in(reg) asid, options(nostack)) } @@ -142,7 +144,7 @@ pub unsafe fn sinval_vma_asid(asid: usize) { /// This instruction invalidates any address-translation cache entries that an /// `SFENCE.VMA` instruction with the same values of `vaddr` and `asid` would invalidate. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sinval_vma_all() { asm!(".insn r 0x73, 0, 0x0B, x0, x0, x0", options(nostack)) } @@ -152,7 +154,7 @@ pub unsafe fn sinval_vma_all() { /// This instruction guarantees that any previous stores already visible to the current RISC-V hart /// are ordered before subsequent `SINVAL.VMA` instructions executed by the same hart. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sfence_w_inval() { // asm!("sfence.w.inval", options(nostack)) asm!(".insn i 0x73, 0, x0, x0, 0x180", options(nostack)) @@ -163,7 +165,7 @@ pub unsafe fn sfence_w_inval() { /// This instruction guarantees that any previous SINVAL.VMA instructions executed by the current hart /// are ordered before subsequent implicit references by that hart to the memory-management data structures. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn sfence_inval_ir() { // asm!("sfence.inval.ir", options(nostack)) asm!(".insn i 0x73, 0, x0, x0, 0x181", options(nostack)) @@ -178,7 +180,7 @@ pub unsafe fn sfence_inval_ir() { /// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.B` /// instruction which is effectively a dereference to any memory address. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlv_b(src: *const i8) -> i8 { let value: i8; asm!(".insn i 0x73, 0x4, {}, {}, 0x600", out(reg) value, in(reg) src, options(readonly, nostack)); @@ -194,7 +196,7 @@ pub unsafe fn hlv_b(src: *const i8) -> i8 { /// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.BU` /// instruction which is effectively a dereference to any memory address. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlv_bu(src: *const u8) -> u8 { let value: u8; asm!(".insn i 0x73, 0x4, {}, {}, 0x601", out(reg) value, in(reg) src, options(readonly, nostack)); @@ -210,7 +212,7 @@ pub unsafe fn hlv_bu(src: *const u8) -> u8 { /// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.H` /// instruction which is effectively a dereference to any memory address. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlv_h(src: *const i16) -> i16 { let value: i16; asm!(".insn i 0x73, 0x4, {}, {}, 0x640", out(reg) value, in(reg) src, options(readonly, nostack)); @@ -226,7 +228,7 @@ pub unsafe fn hlv_h(src: *const i16) -> i16 { /// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.HU` /// instruction which is effectively a dereference to any memory address. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlv_hu(src: *const u16) -> u16 { let value: u16; asm!(".insn i 0x73, 0x4, {}, {}, 0x641", out(reg) value, in(reg) src, options(readonly, nostack)); @@ -242,7 +244,7 @@ pub unsafe fn hlv_hu(src: *const u16) -> u16 { /// This function is unsafe for it accesses the virtual supervisor or user via a `HLVX.HU` /// instruction which is effectively a dereference to any memory address. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlvx_hu(src: *const u16) -> u16 { let insn: u16; asm!(".insn i 0x73, 0x4, {}, {}, 0x643", out(reg) insn, in(reg) src, options(readonly, nostack)); @@ -258,7 +260,7 @@ pub unsafe fn hlvx_hu(src: *const u16) -> u16 { /// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.W` /// instruction which is effectively a dereference to any memory address. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlv_w(src: *const i32) -> i32 { let value: i32; asm!(".insn i 0x73, 0x4, {}, {}, 0x680", out(reg) value, in(reg) src, options(readonly, nostack)); @@ -274,7 +276,7 @@ pub unsafe fn hlv_w(src: *const i32) -> i32 { /// This function is unsafe for it accesses the virtual supervisor or user via a `HLVX.WU` /// instruction which is effectively a dereference to any memory address. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hlvx_wu(src: *const u32) -> u32 { let insn: u32; asm!(".insn i 0x73, 0x4, {}, {}, 0x683", out(reg) insn, in(reg) src, options(readonly, nostack)); @@ -290,7 +292,7 @@ pub unsafe fn hlvx_wu(src: *const u32) -> u32 { /// This function is unsafe for it accesses the virtual supervisor or user via a `HSV.B` /// instruction which is effectively a dereference to any memory address. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hsv_b(dst: *mut i8, src: i8) { asm!(".insn r 0x73, 0x4, 0x31, x0, {}, {}", in(reg) dst, in(reg) src, options(nostack)); } @@ -304,7 +306,7 @@ pub unsafe fn hsv_b(dst: *mut i8, src: i8) { /// This function is unsafe for it accesses the virtual supervisor or user via a `HSV.H` /// instruction which is effectively a dereference to any memory address. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hsv_h(dst: *mut i16, src: i16) { asm!(".insn r 0x73, 0x4, 0x33, x0, {}, {}", in(reg) dst, in(reg) src, options(nostack)); } @@ -318,7 +320,7 @@ pub unsafe fn hsv_h(dst: *mut i16, src: i16) { /// This function is unsafe for it accesses the virtual supervisor or user via a `HSV.W` /// instruction which is effectively a dereference to any memory address. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hsv_w(dst: *mut i32, src: i32) { asm!(".insn r 0x73, 0x4, 0x35, x0, {}, {}", in(reg) dst, in(reg) src, options(nostack)); } @@ -332,7 +334,7 @@ pub unsafe fn hsv_w(dst: *mut i32, src: i32) { /// /// This fence specifies a single guest virtual address, and a single guest address-space identifier. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hfence_vvma(vaddr: usize, asid: usize) { // asm!("hfence.vvma {}, {}", in(reg) vaddr, in(reg) asid) asm!(".insn r 0x73, 0, 0x11, x0, {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) @@ -347,7 +349,7 @@ pub unsafe fn hfence_vvma(vaddr: usize, asid: usize) { /// /// This fence specifies a single guest virtual address. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hfence_vvma_vaddr(vaddr: usize) { asm!(".insn r 0x73, 0, 0x11, x0, {}, x0", in(reg) vaddr, options(nostack)) } @@ -361,7 +363,7 @@ pub unsafe fn hfence_vvma_vaddr(vaddr: usize) { /// /// This fence specifies a single guest address-space identifier. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hfence_vvma_asid(asid: usize) { asm!(".insn r 0x73, 0, 0x11, x0, x0, {}", in(reg) asid, options(nostack)) } @@ -375,7 +377,7 @@ pub unsafe fn hfence_vvma_asid(asid: usize) { /// /// This fence applies to any guest address spaces and guest virtual addresses. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hfence_vvma_all() { asm!(".insn r 0x73, 0, 0x11, x0, x0, x0", options(nostack)) } @@ -388,7 +390,7 @@ pub unsafe fn hfence_vvma_all() { /// This fence specifies a single guest physical address, **shifted right by 2 bits**, and a single virtual machine /// by virtual machine identifier (VMID). #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hfence_gvma(gaddr: usize, vmid: usize) { // asm!("hfence.gvma {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack)) asm!(".insn r 0x73, 0, 0x31, x0, {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack)) @@ -401,7 +403,7 @@ pub unsafe fn hfence_gvma(gaddr: usize, vmid: usize) { /// /// This fence specifies a single guest physical address; **the physical address should be shifted right by 2 bits**. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hfence_gvma_gaddr(gaddr: usize) { asm!(".insn r 0x73, 0, 0x31, x0, {}, x0", in(reg) gaddr, options(nostack)) } @@ -413,7 +415,7 @@ pub unsafe fn hfence_gvma_gaddr(gaddr: usize) { /// /// This fence specifies a single virtual machine by virtual machine identifier (VMID). #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hfence_gvma_vmid(vmid: usize) { asm!(".insn r 0x73, 0, 0x31, x0, x0, {}", in(reg) vmid, options(nostack)) } @@ -425,7 +427,7 @@ pub unsafe fn hfence_gvma_vmid(vmid: usize) { /// /// This fence specifies all guest physical addresses and all virtual machines. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hfence_gvma_all() { asm!(".insn r 0x73, 0, 0x31, x0, x0, x0", options(nostack)) } @@ -437,7 +439,7 @@ pub unsafe fn hfence_gvma_all() { /// /// This fence specifies a single guest virtual address, and a single guest address-space identifier. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hinval_vvma(vaddr: usize, asid: usize) { // asm!("hinval.vvma {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) asm!(".insn r 0x73, 0, 0x13, x0, {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) @@ -450,7 +452,7 @@ pub unsafe fn hinval_vvma(vaddr: usize, asid: usize) { /// /// This fence specifies a single guest virtual address. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hinval_vvma_vaddr(vaddr: usize) { asm!(".insn r 0x73, 0, 0x13, x0, {}, x0", in(reg) vaddr, options(nostack)) } @@ -462,7 +464,7 @@ pub unsafe fn hinval_vvma_vaddr(vaddr: usize) { /// /// This fence specifies a single guest address-space identifier. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hinval_vvma_asid(asid: usize) { asm!(".insn r 0x73, 0, 0x13, x0, x0, {}", in(reg) asid, options(nostack)) } @@ -474,7 +476,7 @@ pub unsafe fn hinval_vvma_asid(asid: usize) { /// /// This fence applies to any guest address spaces and guest virtual addresses. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hinval_vvma_all() { asm!(".insn r 0x73, 0, 0x13, x0, x0, x0", options(nostack)) } @@ -487,7 +489,7 @@ pub unsafe fn hinval_vvma_all() { /// This fence specifies a single guest physical address, **shifted right by 2 bits**, and a single virtual machine /// by virtual machine identifier (VMID). #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hinval_gvma(gaddr: usize, vmid: usize) { // asm!("hinval.gvma {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack)) asm!(".insn r 0x73, 0, 0x33, x0, {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack)) @@ -500,7 +502,7 @@ pub unsafe fn hinval_gvma(gaddr: usize, vmid: usize) { /// /// This fence specifies a single guest physical address; **the physical address should be shifted right by 2 bits**. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hinval_gvma_gaddr(gaddr: usize) { asm!(".insn r 0x73, 0, 0x33, x0, {}, x0", in(reg) gaddr, options(nostack)) } @@ -512,7 +514,7 @@ pub unsafe fn hinval_gvma_gaddr(gaddr: usize) { /// /// This fence specifies a single virtual machine by virtual machine identifier (VMID). #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hinval_gvma_vmid(vmid: usize) { asm!(".insn r 0x73, 0, 0x33, x0, x0, {}", in(reg) vmid, options(nostack)) } @@ -524,7 +526,7 @@ pub unsafe fn hinval_gvma_vmid(vmid: usize) { /// /// This fence specifies all guest physical addresses and all virtual machines. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub unsafe fn hinval_gvma_all() { asm!(".insn r 0x73, 0, 0x33, x0, x0, x0", options(nostack)) } @@ -548,7 +550,7 @@ pub unsafe fn hinval_gvma_all() { /// [`frrm`]: fn.frrm.html /// [`frflags`]: fn.frflags.html #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn frcsr() -> u32 { let value: u32; unsafe { asm!("frcsr {}", out(reg) value, options(nomem, nostack)) }; @@ -560,7 +562,7 @@ pub fn frcsr() -> u32 { /// This function swaps the value in `fcsr` by copying the original value to be returned, /// and then writing a new value obtained from input variable `value` into `fcsr`. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn fscsr(value: u32) -> u32 { let original: u32; unsafe { asm!("fscsr {}, {}", out(reg) original, in(reg) value, options(nomem, nostack)) } @@ -583,7 +585,7 @@ pub fn fscsr(value: u32) -> u32 { /// | 110 | | _Reserved for future use._ | /// | 111 | DYN | In Rounding Mode register, _reserved_. | #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn frrm() -> u32 { let value: u32; unsafe { asm!("frrm {}", out(reg) value, options(nomem, nostack)) }; @@ -596,7 +598,7 @@ pub fn frrm() -> u32 { /// and then writing a new value obtained from the three least-significant bits of /// input variable `value` into `frm`. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn fsrm(value: u32) -> u32 { let original: u32; unsafe { asm!("fsrm {}, {}", out(reg) original, in(reg) value, options(nomem, nostack)) } @@ -620,7 +622,7 @@ pub fn fsrm(value: u32) -> u32 { /// | 1 | UF | Underflow | /// | 0 | NX | Inexact | #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn frflags() -> u32 { let value: u32; unsafe { asm!("frflags {}", out(reg) value, options(nomem, nostack)) }; @@ -633,7 +635,7 @@ pub fn frflags() -> u32 { /// and then writing a new value obtained from the five least-significant bits of /// input variable `value` into `fflags`. #[inline] -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn fsflags(value: u32) -> u32 { let original: u32; unsafe { asm!("fsflags {}, {}", out(reg) original, in(reg) value, options(nomem, nostack)) } diff --git a/crates/core_arch/src/riscv_shared/p.rs b/crates/core_arch/src/riscv_shared/p.rs index a26044aeef..ca3c27d1c0 100644 --- a/crates/core_arch/src/riscv_shared/p.rs +++ b/crates/core_arch/src/riscv_shared/p.rs @@ -5,6 +5,7 @@ use crate::arch::asm; /// Adds packed 16-bit signed numbers, discarding overflow bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn add16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -15,6 +16,7 @@ pub fn add16(a: usize, b: usize) -> usize { /// Halves the sum of packed 16-bit signed numbers, dropping least bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn radd16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -25,6 +27,7 @@ pub fn radd16(a: usize, b: usize) -> usize { /// Halves the sum of packed 16-bit unsigned numbers, dropping least bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn uradd16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -35,6 +38,7 @@ pub fn uradd16(a: usize, b: usize) -> usize { /// Adds packed 16-bit signed numbers, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn kadd16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -45,6 +49,7 @@ pub fn kadd16(a: usize, b: usize) -> usize { /// Adds packed 16-bit unsigned numbers, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ukadd16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -55,6 +60,7 @@ pub fn ukadd16(a: usize, b: usize) -> usize { /// Subtracts packed 16-bit signed numbers, discarding overflow bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn sub16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -65,6 +71,7 @@ pub fn sub16(a: usize, b: usize) -> usize { /// Halves the subtraction result of packed 16-bit signed numbers, dropping least bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn rsub16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -75,6 +82,7 @@ pub fn rsub16(a: usize, b: usize) -> usize { /// Halves the subtraction result of packed 16-bit unsigned numbers, dropping least bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ursub16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -85,6 +93,7 @@ pub fn ursub16(a: usize, b: usize) -> usize { /// Subtracts packed 16-bit signed numbers, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ksub16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -95,6 +104,7 @@ pub fn ksub16(a: usize, b: usize) -> usize { /// Subtracts packed 16-bit unsigned numbers, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn uksub16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -105,6 +115,7 @@ pub fn uksub16(a: usize, b: usize) -> usize { /// Cross adds and subtracts packed 16-bit signed numbers, discarding overflow bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn cras16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -115,6 +126,7 @@ pub fn cras16(a: usize, b: usize) -> usize { /// Cross halves of adds and subtracts packed 16-bit signed numbers, dropping least bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn rcras16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -125,6 +137,7 @@ pub fn rcras16(a: usize, b: usize) -> usize { /// Cross halves of adds and subtracts packed 16-bit unsigned numbers, dropping least bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn urcras16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -135,6 +148,7 @@ pub fn urcras16(a: usize, b: usize) -> usize { /// Cross adds and subtracts packed 16-bit signed numbers, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn kcras16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -145,6 +159,7 @@ pub fn kcras16(a: usize, b: usize) -> usize { /// Cross adds and subtracts packed 16-bit unsigned numbers, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ukcras16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -155,6 +170,7 @@ pub fn ukcras16(a: usize, b: usize) -> usize { /// Cross subtracts and adds packed 16-bit signed numbers, discarding overflow bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn crsa16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -165,6 +181,7 @@ pub fn crsa16(a: usize, b: usize) -> usize { /// Cross halves of subtracts and adds packed 16-bit signed numbers, dropping least bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn rcrsa16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -175,6 +192,7 @@ pub fn rcrsa16(a: usize, b: usize) -> usize { /// Cross halves of subtracts and adds packed 16-bit unsigned numbers, dropping least bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn urcrsa16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -185,6 +203,7 @@ pub fn urcrsa16(a: usize, b: usize) -> usize { /// Cross subtracts and adds packed 16-bit signed numbers, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn kcrsa16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -195,6 +214,7 @@ pub fn kcrsa16(a: usize, b: usize) -> usize { /// Cross subtracts and adds packed 16-bit unsigned numbers, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ukcrsa16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -205,6 +225,7 @@ pub fn ukcrsa16(a: usize, b: usize) -> usize { /// Straight adds and subtracts packed 16-bit signed numbers, discarding overflow bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn stas16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -215,6 +236,7 @@ pub fn stas16(a: usize, b: usize) -> usize { /// Straight halves of adds and subtracts packed 16-bit signed numbers, dropping least bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn rstas16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -225,6 +247,7 @@ pub fn rstas16(a: usize, b: usize) -> usize { /// Straight halves of adds and subtracts packed 16-bit unsigned numbers, dropping least bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn urstas16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -235,6 +258,7 @@ pub fn urstas16(a: usize, b: usize) -> usize { /// Straight adds and subtracts packed 16-bit signed numbers, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn kstas16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -245,6 +269,7 @@ pub fn kstas16(a: usize, b: usize) -> usize { /// Straight adds and subtracts packed 16-bit unsigned numbers, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ukstas16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -255,6 +280,7 @@ pub fn ukstas16(a: usize, b: usize) -> usize { /// Straight subtracts and adds packed 16-bit signed numbers, discarding overflow bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn stsa16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -265,6 +291,7 @@ pub fn stsa16(a: usize, b: usize) -> usize { /// Straight halves of subtracts and adds packed 16-bit signed numbers, dropping least bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn rstsa16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -275,6 +302,7 @@ pub fn rstsa16(a: usize, b: usize) -> usize { /// Straight halves of subtracts and adds packed 16-bit unsigned numbers, dropping least bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn urstsa16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -285,6 +313,7 @@ pub fn urstsa16(a: usize, b: usize) -> usize { /// Straight subtracts and adds packed 16-bit signed numbers, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn kstsa16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -295,6 +324,7 @@ pub fn kstsa16(a: usize, b: usize) -> usize { /// Straight subtracts and adds packed 16-bit unsigned numbers, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ukstsa16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -305,6 +335,7 @@ pub fn ukstsa16(a: usize, b: usize) -> usize { /// Adds packed 8-bit signed numbers, discarding overflow bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn add8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -315,6 +346,7 @@ pub fn add8(a: usize, b: usize) -> usize { /// Halves the sum of packed 8-bit signed numbers, dropping least bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn radd8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -325,6 +357,7 @@ pub fn radd8(a: usize, b: usize) -> usize { /// Halves the sum of packed 8-bit unsigned numbers, dropping least bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn uradd8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -335,6 +368,7 @@ pub fn uradd8(a: usize, b: usize) -> usize { /// Adds packed 8-bit signed numbers, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn kadd8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -345,6 +379,7 @@ pub fn kadd8(a: usize, b: usize) -> usize { /// Adds packed 8-bit unsigned numbers, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ukadd8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -355,6 +390,7 @@ pub fn ukadd8(a: usize, b: usize) -> usize { /// Subtracts packed 8-bit signed numbers, discarding overflow bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn sub8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -365,6 +401,7 @@ pub fn sub8(a: usize, b: usize) -> usize { /// Halves the subtraction result of packed 8-bit signed numbers, dropping least bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn rsub8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -375,6 +412,7 @@ pub fn rsub8(a: usize, b: usize) -> usize { /// Halves the subtraction result of packed 8-bit unsigned numbers, dropping least bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ursub8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -385,6 +423,7 @@ pub fn ursub8(a: usize, b: usize) -> usize { /// Subtracts packed 8-bit signed numbers, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ksub8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -395,6 +434,7 @@ pub fn ksub8(a: usize, b: usize) -> usize { /// Subtracts packed 8-bit unsigned numbers, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn uksub8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -405,6 +445,7 @@ pub fn uksub8(a: usize, b: usize) -> usize { /// Arithmetic right shift packed 16-bit elements without rounding up #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn sra16(a: usize, b: u32) -> usize { let value: usize; unsafe { @@ -415,6 +456,7 @@ pub fn sra16(a: usize, b: u32) -> usize { /// Arithmetic right shift packed 16-bit elements with rounding up #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn sra16u(a: usize, b: u32) -> usize { let value: usize; unsafe { @@ -425,6 +467,7 @@ pub fn sra16u(a: usize, b: u32) -> usize { /// Logical right shift packed 16-bit elements without rounding up #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn srl16(a: usize, b: u32) -> usize { let value: usize; unsafe { @@ -435,6 +478,7 @@ pub fn srl16(a: usize, b: u32) -> usize { /// Logical right shift packed 16-bit elements with rounding up #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn srl16u(a: usize, b: u32) -> usize { let value: usize; unsafe { @@ -445,6 +489,7 @@ pub fn srl16u(a: usize, b: u32) -> usize { /// Logical left shift packed 16-bit elements, discarding overflow bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn sll16(a: usize, b: u32) -> usize { let value: usize; unsafe { @@ -455,6 +500,7 @@ pub fn sll16(a: usize, b: u32) -> usize { /// Logical left shift packed 16-bit elements, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ksll16(a: usize, b: u32) -> usize { let value: usize; unsafe { @@ -465,6 +511,7 @@ pub fn ksll16(a: usize, b: u32) -> usize { /// Logical saturating left then arithmetic right shift packed 16-bit elements #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn kslra16(a: usize, b: i32) -> usize { let value: usize; unsafe { @@ -475,6 +522,7 @@ pub fn kslra16(a: usize, b: i32) -> usize { /// Logical saturating left then arithmetic right shift packed 16-bit elements #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn kslra16u(a: usize, b: i32) -> usize { let value: usize; unsafe { @@ -485,6 +533,7 @@ pub fn kslra16u(a: usize, b: i32) -> usize { /// Arithmetic right shift packed 8-bit elements without rounding up #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn sra8(a: usize, b: u32) -> usize { let value: usize; unsafe { @@ -495,6 +544,7 @@ pub fn sra8(a: usize, b: u32) -> usize { /// Arithmetic right shift packed 8-bit elements with rounding up #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn sra8u(a: usize, b: u32) -> usize { let value: usize; unsafe { @@ -505,6 +555,7 @@ pub fn sra8u(a: usize, b: u32) -> usize { /// Logical right shift packed 8-bit elements without rounding up #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn srl8(a: usize, b: u32) -> usize { let value: usize; unsafe { @@ -515,6 +566,7 @@ pub fn srl8(a: usize, b: u32) -> usize { /// Logical right shift packed 8-bit elements with rounding up #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn srl8u(a: usize, b: u32) -> usize { let value: usize; unsafe { @@ -525,6 +577,7 @@ pub fn srl8u(a: usize, b: u32) -> usize { /// Logical left shift packed 8-bit elements, discarding overflow bits #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn sll8(a: usize, b: u32) -> usize { let value: usize; unsafe { @@ -535,6 +588,7 @@ pub fn sll8(a: usize, b: u32) -> usize { /// Logical left shift packed 8-bit elements, saturating at the numeric bounds #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ksll8(a: usize, b: u32) -> usize { let value: usize; unsafe { @@ -545,6 +599,7 @@ pub fn ksll8(a: usize, b: u32) -> usize { /// Logical saturating left then arithmetic right shift packed 8-bit elements #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn kslra8(a: usize, b: i32) -> usize { let value: usize; unsafe { @@ -555,6 +610,7 @@ pub fn kslra8(a: usize, b: i32) -> usize { /// Logical saturating left then arithmetic right shift packed 8-bit elements #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn kslra8u(a: usize, b: i32) -> usize { let value: usize; unsafe { @@ -565,6 +621,7 @@ pub fn kslra8u(a: usize, b: i32) -> usize { /// Compare equality for packed 16-bit elements #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn cmpeq16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -575,6 +632,7 @@ pub fn cmpeq16(a: usize, b: usize) -> usize { /// Compare whether 16-bit packed signed integers are less than the others #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn scmplt16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -585,6 +643,7 @@ pub fn scmplt16(a: usize, b: usize) -> usize { /// Compare whether 16-bit packed signed integers are less than or equal to the others #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn scmple16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -595,6 +654,7 @@ pub fn scmple16(a: usize, b: usize) -> usize { /// Compare whether 16-bit packed unsigned integers are less than the others #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ucmplt16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -605,6 +665,7 @@ pub fn ucmplt16(a: usize, b: usize) -> usize { /// Compare whether 16-bit packed unsigned integers are less than or equal to the others #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ucmple16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -615,6 +676,7 @@ pub fn ucmple16(a: usize, b: usize) -> usize { /// Compare equality for packed 8-bit elements #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn cmpeq8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -625,6 +687,7 @@ pub fn cmpeq8(a: usize, b: usize) -> usize { /// Compare whether 8-bit packed signed integers are less than the others #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn scmplt8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -635,6 +698,7 @@ pub fn scmplt8(a: usize, b: usize) -> usize { /// Compare whether 8-bit packed signed integers are less than or equal to the others #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn scmple8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -645,6 +709,7 @@ pub fn scmple8(a: usize, b: usize) -> usize { /// Compare whether 8-bit packed unsigned integers are less than the others #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ucmplt8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -655,6 +720,7 @@ pub fn ucmplt8(a: usize, b: usize) -> usize { /// Compare whether 8-bit packed unsigned integers are less than or equal to the others #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ucmple8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -665,6 +731,7 @@ pub fn ucmple8(a: usize, b: usize) -> usize { /// Get minimum values from 16-bit packed signed integers #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn smin16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -675,6 +742,7 @@ pub fn smin16(a: usize, b: usize) -> usize { /// Get minimum values from 16-bit packed unsigned integers #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn umin16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -685,6 +753,7 @@ pub fn umin16(a: usize, b: usize) -> usize { /// Get maximum values from 16-bit packed signed integers #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn smax16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -695,6 +764,7 @@ pub fn smax16(a: usize, b: usize) -> usize { /// Get maximum values from 16-bit packed unsigned integers #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn umax16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -707,6 +777,7 @@ pub fn umax16(a: usize, b: usize) -> usize { /// Compute the absolute value of packed 16-bit signed integers #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn kabs16(a: usize) -> usize { let value: usize; unsafe { @@ -717,6 +788,7 @@ pub fn kabs16(a: usize) -> usize { /// Count the number of redundant sign bits of the packed 16-bit elements #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn clrs16(a: usize) -> usize { let value: usize; unsafe { @@ -727,6 +799,7 @@ pub fn clrs16(a: usize) -> usize { /// Count the number of leading zero bits of the packed 16-bit elements #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn clz16(a: usize) -> usize { let value: usize; unsafe { @@ -737,6 +810,7 @@ pub fn clz16(a: usize) -> usize { /// Swap the 16-bit halfwords within each 32-bit word of a register #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn swap16(a: usize) -> usize { let value: usize; // this instruction is an alias for `pkbt rd, rs1, rs1`. @@ -748,6 +822,7 @@ pub fn swap16(a: usize) -> usize { /// Get minimum values from 8-bit packed signed integers #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn smin8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -758,6 +833,7 @@ pub fn smin8(a: usize, b: usize) -> usize { /// Get minimum values from 8-bit packed unsigned integers #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn umin8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -768,6 +844,7 @@ pub fn umin8(a: usize, b: usize) -> usize { /// Get maximum values from 8-bit packed signed integers #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn smax8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -778,6 +855,7 @@ pub fn smax8(a: usize, b: usize) -> usize { /// Get maximum values from 8-bit packed unsigned integers #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn umax8(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -790,6 +868,7 @@ pub fn umax8(a: usize, b: usize) -> usize { /// Compute the absolute value of packed 8-bit signed integers #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn kabs8(a: usize) -> usize { let value: usize; unsafe { @@ -800,6 +879,7 @@ pub fn kabs8(a: usize) -> usize { /// Count the number of redundant sign bits of the packed 8-bit elements #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn clrs8(a: usize) -> usize { let value: usize; unsafe { @@ -810,6 +890,7 @@ pub fn clrs8(a: usize) -> usize { /// Count the number of leading zero bits of the packed 8-bit elements #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn clz8(a: usize) -> usize { let value: usize; unsafe { @@ -820,6 +901,7 @@ pub fn clz8(a: usize) -> usize { /// Swap the 8-bit bytes within each 16-bit halfword of a register. #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn swap8(a: usize) -> usize { let value: usize; unsafe { @@ -830,6 +912,7 @@ pub fn swap8(a: usize) -> usize { /// Unpack first and zeroth into two 16-bit signed halfwords in each 32-bit chunk #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn sunpkd810(a: usize) -> usize { let value: usize; unsafe { @@ -840,6 +923,7 @@ pub fn sunpkd810(a: usize) -> usize { /// Unpack second and zeroth into two 16-bit signed halfwords in each 32-bit chunk #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn sunpkd820(a: usize) -> usize { let value: usize; unsafe { @@ -850,6 +934,7 @@ pub fn sunpkd820(a: usize) -> usize { /// Unpack third and zeroth into two 16-bit signed halfwords in each 32-bit chunk #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn sunpkd830(a: usize) -> usize { let value: usize; unsafe { @@ -860,6 +945,7 @@ pub fn sunpkd830(a: usize) -> usize { /// Unpack third and first into two 16-bit signed halfwords in each 32-bit chunk #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn sunpkd831(a: usize) -> usize { let value: usize; unsafe { @@ -870,6 +956,7 @@ pub fn sunpkd831(a: usize) -> usize { /// Unpack third and second into two 16-bit signed halfwords in each 32-bit chunk #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn sunpkd832(a: usize) -> usize { let value: usize; unsafe { @@ -880,6 +967,7 @@ pub fn sunpkd832(a: usize) -> usize { /// Unpack first and zeroth into two 16-bit unsigned halfwords in each 32-bit chunk #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn zunpkd810(a: usize) -> usize { let value: usize; unsafe { @@ -890,6 +978,7 @@ pub fn zunpkd810(a: usize) -> usize { /// Unpack second and zeroth into two 16-bit unsigned halfwords in each 32-bit chunk #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn zunpkd820(a: usize) -> usize { let value: usize; unsafe { @@ -900,6 +989,7 @@ pub fn zunpkd820(a: usize) -> usize { /// Unpack third and zeroth into two 16-bit unsigned halfwords in each 32-bit chunk #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn zunpkd830(a: usize) -> usize { let value: usize; unsafe { @@ -910,6 +1000,7 @@ pub fn zunpkd830(a: usize) -> usize { /// Unpack third and first into two 16-bit unsigned halfwords in each 32-bit chunk #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn zunpkd831(a: usize) -> usize { let value: usize; unsafe { @@ -920,6 +1011,7 @@ pub fn zunpkd831(a: usize) -> usize { /// Unpack third and second into two 16-bit unsigned halfwords in each 32-bit chunk #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn zunpkd832(a: usize) -> usize { let value: usize; unsafe { @@ -932,6 +1024,7 @@ pub fn zunpkd832(a: usize) -> usize { /// Pack two 16-bit data from bottom and top half from 32-bit chunks #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn pkbt16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -942,6 +1035,7 @@ pub fn pkbt16(a: usize, b: usize) -> usize { /// Pack two 16-bit data from top and bottom half from 32-bit chunks #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn pktb16(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -952,6 +1046,7 @@ pub fn pktb16(a: usize, b: usize) -> usize { /// Count the number of redundant sign bits of the packed 32-bit elements #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn clrs32(a: usize) -> usize { let value: usize; unsafe { @@ -962,6 +1057,7 @@ pub fn clrs32(a: usize) -> usize { /// Count the number of leading zero bits of the packed 32-bit elements #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn clz32(a: usize) -> usize { let value: usize; unsafe { @@ -972,6 +1068,7 @@ pub fn clz32(a: usize) -> usize { /// Calculate the sum of absolute difference of unsigned 8-bit data elements #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn pbsad(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -982,6 +1079,7 @@ pub fn pbsad(a: usize, b: usize) -> usize { /// Calculate and accumulate the sum of absolute difference of unsigned 8-bit data elements #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn pbsada(t: usize, a: usize, b: usize) -> usize { let mut value: usize; unsafe { @@ -992,6 +1090,7 @@ pub fn pbsada(t: usize, a: usize, b: usize) -> usize { /// Multiply signed 8-bit elements and add 16-bit elements on results for packed 32-bit chunks #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn smaqa(t: usize, a: usize, b: usize) -> usize { let mut value: usize; unsafe { @@ -1002,6 +1101,7 @@ pub fn smaqa(t: usize, a: usize, b: usize) -> usize { /// Multiply unsigned 8-bit elements and add 16-bit elements on results for packed 32-bit chunks #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn umaqa(t: usize, a: usize, b: usize) -> usize { let mut value: usize; unsafe { @@ -1012,6 +1112,7 @@ pub fn umaqa(t: usize, a: usize, b: usize) -> usize { /// Multiply signed to unsigned 8-bit and add 16-bit elements on results for packed 32-bit chunks #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn smaqasu(t: usize, a: usize, b: usize) -> usize { let mut value: usize; unsafe { @@ -1022,6 +1123,7 @@ pub fn smaqasu(t: usize, a: usize, b: usize) -> usize { /// Adds signed lower 16-bit content of two registers with Q15 saturation #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn kaddh(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -1032,6 +1134,7 @@ pub fn kaddh(a: usize, b: usize) -> usize { /// Subtracts signed lower 16-bit content of two registers with Q15 saturation #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ksubh(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -1042,6 +1145,7 @@ pub fn ksubh(a: usize, b: usize) -> usize { /// Adds signed lower 16-bit content of two registers with U16 saturation #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn ukaddh(a: usize, b: usize) -> usize { let value: usize; unsafe { @@ -1052,6 +1156,7 @@ pub fn ukaddh(a: usize, b: usize) -> usize { /// Subtracts signed lower 16-bit content of two registers with U16 saturation #[inline] +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] pub fn uksubh(a: usize, b: usize) -> usize { let value: usize; unsafe { diff --git a/crates/core_arch/src/riscv_shared/zb.rs b/crates/core_arch/src/riscv_shared/zb.rs index 6785c04fd5..841b707989 100644 --- a/crates/core_arch/src/riscv_shared/zb.rs +++ b/crates/core_arch/src/riscv_shared/zb.rs @@ -46,6 +46,7 @@ extern "unadjusted" { /// # Safety /// /// This function is safe to use if the `zbb` target feature is present. +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] #[target_feature(enable = "zbb")] #[cfg_attr(test, assert_instr(orc.b))] #[inline] @@ -74,6 +75,7 @@ pub unsafe fn orc_b(rs: usize) -> usize { /// # Safety /// /// This function is safe to use if the `zbc` target feature is present. +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] #[target_feature(enable = "zbc")] #[cfg_attr(test, assert_instr(clmul))] #[inline] @@ -102,6 +104,7 @@ pub unsafe fn clmul(rs1: usize, rs2: usize) -> usize { /// # Safety /// /// This function is safe to use if the `zbc` target feature is present. +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] #[target_feature(enable = "zbc")] #[cfg_attr(test, assert_instr(clmulh))] #[inline] @@ -130,6 +133,7 @@ pub unsafe fn clmulh(rs1: usize, rs2: usize) -> usize { /// # Safety /// /// This function is safe to use if the `zbc` target feature is present. +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] #[target_feature(enable = "zbc")] #[cfg_attr(test, assert_instr(clmulr))] #[inline] diff --git a/crates/core_arch/src/riscv_shared/zk.rs b/crates/core_arch/src/riscv_shared/zk.rs index 5fc5b4cdaf..2d0e8602f4 100644 --- a/crates/core_arch/src/riscv_shared/zk.rs +++ b/crates/core_arch/src/riscv_shared/zk.rs @@ -61,6 +61,7 @@ extern "unadjusted" { /// # Safety /// /// This function is safe to use if the `zbkx` target feature is present. +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] #[target_feature(enable = "zbkx")] #[cfg_attr(test, assert_instr(xperm8))] #[inline] @@ -92,6 +93,7 @@ pub unsafe fn xperm8(rs1: usize, rs2: usize) -> usize { /// # Safety /// /// This function is safe to use if the `zbkx` target feature is present. +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] #[target_feature(enable = "zbkx")] #[cfg_attr(test, assert_instr(xperm4))] #[inline] @@ -126,6 +128,7 @@ pub unsafe fn xperm4(rs1: usize, rs2: usize) -> usize { /// # Safety /// /// This function is safe to use if the `zknh` target feature is present. +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] #[target_feature(enable = "zknh")] #[cfg_attr(test, assert_instr(sha256sig0))] #[inline] @@ -152,6 +155,7 @@ pub unsafe fn sha256sig0(rs1: u32) -> u32 { /// # Safety /// /// This function is safe to use if the `zknh` target feature is present. +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] #[target_feature(enable = "zknh")] #[cfg_attr(test, assert_instr(sha256sig1))] #[inline] @@ -178,6 +182,7 @@ pub unsafe fn sha256sig1(rs1: u32) -> u32 { /// # Safety /// /// This function is safe to use if the `zknh` target feature is present. +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] #[target_feature(enable = "zknh")] #[cfg_attr(test, assert_instr(sha256sum0))] #[inline] @@ -204,6 +209,7 @@ pub unsafe fn sha256sum0(rs1: u32) -> u32 { /// # Safety /// /// This function is safe to use if the `zknh` target feature is present. +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] #[target_feature(enable = "zknh")] #[cfg_attr(test, assert_instr(sha256sum1))] #[inline] @@ -280,6 +286,7 @@ pub unsafe fn sha256sum1(rs1: u32) -> u32 { /// return c3; // c3 represents c[0..=3] /// # } /// ``` +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] #[target_feature(enable = "zksed")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(sm4ed, BS = 0))] @@ -359,6 +366,7 @@ pub unsafe fn sm4ed(rs1: u32, rs2: u32) -> u32 { /// return c3; // c3 represents c[0..=3] /// # } /// ``` +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] #[target_feature(enable = "zksed")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(sm4ks, BS = 0))] @@ -400,6 +408,7 @@ pub unsafe fn sm4ks(rs1: u32, rs2: u32) -> u32 { /// In the SM3 algorithm, the `P0` transformation is used as `E ← P0(TT2)` when the /// compression function `CF` uses the intermediate value `TT2` to calculate /// the variable `E` in one iteration for subsequent processes. +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] #[target_feature(enable = "zksh")] #[cfg_attr(test, assert_instr(sm3p0))] #[inline] @@ -444,6 +453,7 @@ pub unsafe fn sm3p0(rs1: u32) -> u32 { /// Wj ← P1(Wj−16 ⊕ Wj−9 ⊕ (Wj−3 ≪ 15)) ⊕ (Wj−13 ≪ 7) ⊕ Wj−6 /// ENDFOR /// ``` +#[unstable(feature = "riscv_ext_intrinsics", issue = "114544")] #[target_feature(enable = "zksh")] #[cfg_attr(test, assert_instr(sm3p1))] #[inline] diff --git a/crates/core_arch/src/wasm32/atomic.rs b/crates/core_arch/src/wasm32/atomic.rs index 52d4bea872..0d5aba2f12 100644 --- a/crates/core_arch/src/wasm32/atomic.rs +++ b/crates/core_arch/src/wasm32/atomic.rs @@ -36,6 +36,7 @@ extern "C" { #[cfg_attr(test, assert_instr(memory.atomic.wait32))] #[target_feature(enable = "atomics")] #[doc(alias("memory.atomic.wait32"))] +#[unstable(feature = "stdarch_wasm_atomic_wait", issue = "77839")] pub unsafe fn memory_atomic_wait32(ptr: *mut i32, expression: i32, timeout_ns: i64) -> i32 { llvm_atomic_wait_i32(ptr, expression, timeout_ns) } @@ -66,6 +67,7 @@ pub unsafe fn memory_atomic_wait32(ptr: *mut i32, expression: i32, timeout_ns: i #[cfg_attr(test, assert_instr(memory.atomic.wait64))] #[target_feature(enable = "atomics")] #[doc(alias("memory.atomic.wait64"))] +#[unstable(feature = "stdarch_wasm_atomic_wait", issue = "77839")] pub unsafe fn memory_atomic_wait64(ptr: *mut i64, expression: i64, timeout_ns: i64) -> i32 { llvm_atomic_wait_i64(ptr, expression, timeout_ns) } @@ -88,6 +90,7 @@ pub unsafe fn memory_atomic_wait64(ptr: *mut i64, expression: i64, timeout_ns: i #[cfg_attr(test, assert_instr(memory.atomic.notify))] #[target_feature(enable = "atomics")] #[doc(alias("memory.atomic.notify"))] +#[unstable(feature = "stdarch_wasm_atomic_wait", issue = "77839")] pub unsafe fn memory_atomic_notify(ptr: *mut i32, waiters: u32) -> u32 { llvm_atomic_notify(ptr, waiters as i32) as u32 } diff --git a/crates/core_arch/src/wasm32/mod.rs b/crates/core_arch/src/wasm32/mod.rs index ed5fc01ddf..e07b361e64 100644 --- a/crates/core_arch/src/wasm32/mod.rs +++ b/crates/core_arch/src/wasm32/mod.rs @@ -4,15 +4,19 @@ use stdarch_test::assert_instr; mod atomic; +#[unstable(feature = "stdarch_wasm_atomic_wait", issue = "77839")] pub use self::atomic::*; mod simd128; +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use self::simd128::*; mod relaxed_simd; +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub use self::relaxed_simd::*; mod memory; +#[stable(feature = "simd_wasm32", since = "1.33.0")] pub use self::memory::*; /// Generates the [`unreachable`] instruction, which causes an unconditional [trap]. diff --git a/crates/core_arch/src/wasm32/relaxed_simd.rs b/crates/core_arch/src/wasm32/relaxed_simd.rs index 403fc79d01..91c128f28b 100644 --- a/crates/core_arch/src/wasm32/relaxed_simd.rs +++ b/crates/core_arch/src/wasm32/relaxed_simd.rs @@ -68,6 +68,7 @@ extern "C" { #[cfg_attr(test, assert_instr(i8x16.relaxed_swizzle))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("i8x16.relaxed_swizzle"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn i8x16_relaxed_swizzle(a: v128, s: v128) -> v128 { unsafe { llvm_relaxed_swizzle(a.as_i8x16(), s.as_i8x16()).v128() } } @@ -81,6 +82,7 @@ pub fn i8x16_relaxed_swizzle(a: v128, s: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.relaxed_trunc_f32x4_s))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("i32x4.relaxed_trunc_f32x4_s"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn i32x4_relaxed_trunc_f32x4(a: v128) -> v128 { unsafe { llvm_relaxed_trunc_signed(a.as_f32x4()).v128() } } @@ -94,6 +96,7 @@ pub fn i32x4_relaxed_trunc_f32x4(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.relaxed_trunc_f32x4_u))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("i32x4.relaxed_trunc_f32x4_u"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn u32x4_relaxed_trunc_f32x4(a: v128) -> v128 { unsafe { llvm_relaxed_trunc_unsigned(a.as_f32x4()).v128() } } @@ -107,6 +110,7 @@ pub fn u32x4_relaxed_trunc_f32x4(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.relaxed_trunc_f64x2_s_zero))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("i32x4.relaxed_trunc_f64x2_s_zero"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn i32x4_relaxed_trunc_f64x2_zero(a: v128) -> v128 { unsafe { llvm_relaxed_trunc_signed_zero(a.as_f64x2()).v128() } } @@ -120,6 +124,7 @@ pub fn i32x4_relaxed_trunc_f64x2_zero(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.relaxed_trunc_f64x2_u_zero))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("i32x4.relaxed_trunc_f64x2_u_zero"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn u32x4_relaxed_trunc_f64x2_zero(a: v128) -> v128 { unsafe { llvm_relaxed_trunc_unsigned_zero(a.as_f64x2()).v128() } } @@ -129,6 +134,7 @@ pub fn u32x4_relaxed_trunc_f64x2_zero(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.relaxed_madd))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("f32x4.relaxed_madd"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn f32x4_relaxed_madd(a: v128, b: v128, c: v128) -> v128 { unsafe { llvm_f32x4_fma(a.as_f32x4(), b.as_f32x4(), c.as_f32x4()).v128() } } @@ -138,6 +144,7 @@ pub fn f32x4_relaxed_madd(a: v128, b: v128, c: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.relaxed_nmadd))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("f32x4.relaxed_nmadd"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn f32x4_relaxed_nmadd(a: v128, b: v128, c: v128) -> v128 { unsafe { llvm_f32x4_fms(a.as_f32x4(), b.as_f32x4(), c.as_f32x4()).v128() } } @@ -147,6 +154,7 @@ pub fn f32x4_relaxed_nmadd(a: v128, b: v128, c: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.relaxed_madd))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("f64x2.relaxed_madd"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn f64x2_relaxed_madd(a: v128, b: v128, c: v128) -> v128 { unsafe { llvm_f64x2_fma(a.as_f64x2(), b.as_f64x2(), c.as_f64x2()).v128() } } @@ -156,6 +164,7 @@ pub fn f64x2_relaxed_madd(a: v128, b: v128, c: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.relaxed_nmadd))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("f64x2.relaxed_nmadd"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn f64x2_relaxed_nmadd(a: v128, b: v128, c: v128) -> v128 { unsafe { llvm_f64x2_fms(a.as_f64x2(), b.as_f64x2(), c.as_f64x2()).v128() } } @@ -171,6 +180,7 @@ pub fn f64x2_relaxed_nmadd(a: v128, b: v128, c: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.relaxed_laneselect))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("i8x16.relaxed_laneselect"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn i8x16_relaxed_laneselect(a: v128, b: v128, m: v128) -> v128 { unsafe { llvm_i8x16_laneselect(a.as_i8x16(), b.as_i8x16(), m.as_i8x16()).v128() } } @@ -186,6 +196,7 @@ pub fn i8x16_relaxed_laneselect(a: v128, b: v128, m: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.relaxed_laneselect))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("i16x8.relaxed_laneselect"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn i16x8_relaxed_laneselect(a: v128, b: v128, m: v128) -> v128 { unsafe { llvm_i16x8_laneselect(a.as_i16x8(), b.as_i16x8(), m.as_i16x8()).v128() } } @@ -201,6 +212,7 @@ pub fn i16x8_relaxed_laneselect(a: v128, b: v128, m: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.relaxed_laneselect))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("i32x4.relaxed_laneselect"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn i32x4_relaxed_laneselect(a: v128, b: v128, m: v128) -> v128 { unsafe { llvm_i32x4_laneselect(a.as_i32x4(), b.as_i32x4(), m.as_i32x4()).v128() } } @@ -216,6 +228,7 @@ pub fn i32x4_relaxed_laneselect(a: v128, b: v128, m: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.relaxed_laneselect))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("i64x2.relaxed_laneselect"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn i64x2_relaxed_laneselect(a: v128, b: v128, m: v128) -> v128 { unsafe { llvm_i64x2_laneselect(a.as_i64x2(), b.as_i64x2(), m.as_i64x2()).v128() } } @@ -226,6 +239,7 @@ pub fn i64x2_relaxed_laneselect(a: v128, b: v128, m: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.relaxed_min))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("f32x4.relaxed_min"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn f32x4_relaxed_min(a: v128, b: v128) -> v128 { unsafe { llvm_f32x4_relaxed_min(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -236,6 +250,7 @@ pub fn f32x4_relaxed_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.relaxed_max))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("f32x4.relaxed_max"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn f32x4_relaxed_max(a: v128, b: v128) -> v128 { unsafe { llvm_f32x4_relaxed_max(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -246,6 +261,7 @@ pub fn f32x4_relaxed_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.relaxed_min))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("f64x2.relaxed_min"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn f64x2_relaxed_min(a: v128, b: v128) -> v128 { unsafe { llvm_f64x2_relaxed_min(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -256,6 +272,7 @@ pub fn f64x2_relaxed_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.relaxed_max))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("f64x2.relaxed_max"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn f64x2_relaxed_max(a: v128, b: v128) -> v128 { unsafe { llvm_f64x2_relaxed_max(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -266,6 +283,7 @@ pub fn f64x2_relaxed_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.relaxed_q15mulr_s))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("i16x8.relaxed_q15mulr_s"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn i16x8_relaxed_q15mulr(a: v128, b: v128) -> v128 { unsafe { llvm_relaxed_q15mulr_signed(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -285,6 +303,7 @@ pub fn i16x8_relaxed_q15mulr(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.relaxed_dot_i8x16_i7x16_s))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("i16x8.relaxed_dot_i8x16_i7x16_s"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn i16x8_relaxed_dot_i8x16_i7x16(a: v128, b: v128) -> v128 { unsafe { llvm_i16x8_relaxed_dot_i8x16_i7x16_s(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -296,6 +315,7 @@ pub fn i16x8_relaxed_dot_i8x16_i7x16(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.relaxed_dot_i8x16_i7x16_add_s))] #[target_feature(enable = "relaxed-simd")] #[doc(alias("i32x4.relaxed_dot_i8x16_i7x16_add_s"))] +#[unstable(feature = "stdarch_wasm_relaxed_simd", issue = "111196")] pub fn i32x4_relaxed_dot_i8x16_i7x16_add(a: v128, b: v128, c: v128) -> v128 { unsafe { llvm_i32x4_relaxed_dot_i8x16_i7x16_add_s(a.as_i8x16(), b.as_i8x16(), c.as_i32x4()).v128() diff --git a/crates/core_arch/src/x86/avx512bf16.rs b/crates/core_arch/src/x86/avx512bf16.rs index b21ededabc..bd4ee45182 100644 --- a/crates/core_arch/src/x86/avx512bf16.rs +++ b/crates/core_arch/src/x86/avx512bf16.rs @@ -36,6 +36,7 @@ extern "C" { /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651&avx512techs=AVX512_BF16&text=_mm_cvtne2ps_pbh) #[inline] #[target_feature(enable = "avx512bf16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vcvtne2ps2bf16"))] pub unsafe fn _mm_cvtne2ps_pbh(a: __m128, b: __m128) -> __m128bh { transmute(cvtne2ps2bf16(a.as_f32x4(), b.as_f32x4())) @@ -48,6 +49,7 @@ pub unsafe fn _mm_cvtne2ps_pbh(a: __m128, b: __m128) -> __m128bh { /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651&avx512techs=AVX512_BF16&text=_mm_mask_cvtne2ps_pbh) #[inline] #[target_feature(enable = "avx512bf16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vcvtne2ps2bf16"))] pub unsafe fn _mm_mask_cvtne2ps_pbh(src: __m128bh, k: __mmask8, a: __m128, b: __m128) -> __m128bh { let cvt = _mm_cvtne2ps_pbh(a, b).as_u16x8(); @@ -61,6 +63,7 @@ pub unsafe fn _mm_mask_cvtne2ps_pbh(src: __m128bh, k: __mmask8, a: __m128, b: __ /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651&avx512techs=AVX512_BF16&text=_mm_maskz_cvtne2ps_pbh) #[inline] #[target_feature(enable = "avx512bf16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vcvtne2ps2bf16"))] pub unsafe fn _mm_maskz_cvtne2ps_pbh(k: __mmask8, a: __m128, b: __m128) -> __m128bh { let cvt = _mm_cvtne2ps_pbh(a, b).as_u16x8(); @@ -74,6 +77,7 @@ pub unsafe fn _mm_maskz_cvtne2ps_pbh(k: __mmask8, a: __m128, b: __m128) -> __m12 /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654&avx512techs=AVX512_BF16&text=_mm256_cvtne2ps_pbh) #[inline] #[target_feature(enable = "avx512bf16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vcvtne2ps2bf16"))] pub unsafe fn _mm256_cvtne2ps_pbh(a: __m256, b: __m256) -> __m256bh { transmute(cvtne2ps2bf16_256(a.as_f32x8(), b.as_f32x8())) @@ -85,6 +89,7 @@ pub unsafe fn _mm256_cvtne2ps_pbh(a: __m256, b: __m256) -> __m256bh { /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654&avx512techs=AVX512_BF16&text=_mm256_mask_cvtne2ps_pbh) #[inline] #[target_feature(enable = "avx512bf16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vcvtne2ps2bf16"))] pub unsafe fn _mm256_mask_cvtne2ps_pbh( src: __m256bh, @@ -102,6 +107,7 @@ pub unsafe fn _mm256_mask_cvtne2ps_pbh( /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654&avx512techs=AVX512_BF16&text=_mm256_maskz_cvtne2ps_pbh) #[inline] #[target_feature(enable = "avx512bf16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vcvtne2ps2bf16"))] pub unsafe fn _mm256_maskz_cvtne2ps_pbh(k: __mmask16, a: __m256, b: __m256) -> __m256bh { let cvt = _mm256_cvtne2ps_pbh(a, b).as_u16x16(); @@ -115,6 +121,7 @@ pub unsafe fn _mm256_maskz_cvtne2ps_pbh(k: __mmask16, a: __m256, b: __m256) -> _ /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657&avx512techs=AVX512_BF16&text=_mm512_cvtne2ps_pbh) #[inline] #[target_feature(enable = "avx512bf16,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vcvtne2ps2bf16"))] pub unsafe fn _mm512_cvtne2ps_pbh(a: __m512, b: __m512) -> __m512bh { transmute(cvtne2ps2bf16_512(a.as_f32x16(), b.as_f32x16())) @@ -127,6 +134,7 @@ pub unsafe fn _mm512_cvtne2ps_pbh(a: __m512, b: __m512) -> __m512bh { /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657&avx512techs=AVX512_BF16&text=_mm512_mask_cvtne2ps_pbh) #[inline] #[target_feature(enable = "avx512bf16,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vcvtne2ps2bf16"))] pub unsafe fn _mm512_mask_cvtne2ps_pbh( src: __m512bh, @@ -145,6 +153,7 @@ pub unsafe fn _mm512_mask_cvtne2ps_pbh( /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657&avx512techs=AVX512_BF16&text=_mm512_maskz_cvtne2ps_pbh) #[inline] #[target_feature(enable = "avx512bf16,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vcvtne2ps2bf16"))] pub unsafe fn _mm512_maskz_cvtne2ps_pbh(k: __mmask32, a: __m512, b: __m512) -> __m512bh { let cvt = _mm512_cvtne2ps_pbh(a, b).as_u16x32(); @@ -157,6 +166,7 @@ pub unsafe fn _mm512_maskz_cvtne2ps_pbh(k: __mmask32, a: __m512, b: __m512) -> _ /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657,1660&avx512techs=AVX512_BF16&text=_mm256_cvtneps_pbh) #[inline] #[target_feature(enable = "avx512bf16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vcvtneps2bf16"))] pub unsafe fn _mm256_cvtneps_pbh(a: __m256) -> __m128bh { transmute(cvtneps2bf16_256(a.as_f32x8())) @@ -168,6 +178,7 @@ pub unsafe fn _mm256_cvtneps_pbh(a: __m256) -> __m128bh { /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657,1660&avx512techs=AVX512_BF16&text=_mm256_mask_cvtneps_pbh) #[inline] #[target_feature(enable = "avx512bf16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vcvtneps2bf16"))] pub unsafe fn _mm256_mask_cvtneps_pbh(src: __m128bh, k: __mmask8, a: __m256) -> __m128bh { let cvt = _mm256_cvtneps_pbh(a).as_u16x8(); @@ -180,6 +191,7 @@ pub unsafe fn _mm256_mask_cvtneps_pbh(src: __m128bh, k: __mmask8, a: __m256) -> /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657,1660&avx512techs=AVX512_BF16&text=_mm256_maskz_cvtneps_pbh) #[inline] #[target_feature(enable = "avx512bf16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vcvtneps2bf16"))] pub unsafe fn _mm256_maskz_cvtneps_pbh(k: __mmask8, a: __m256) -> __m128bh { let cvt = _mm256_cvtneps_pbh(a).as_u16x8(); @@ -192,6 +204,7 @@ pub unsafe fn _mm256_maskz_cvtneps_pbh(k: __mmask8, a: __m256) -> __m128bh { /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657,1660&avx512techs=AVX512_BF16&text=_mm512_cvtneps_pbh) #[inline] #[target_feature(enable = "avx512bf16,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vcvtneps2bf16"))] pub unsafe fn _mm512_cvtneps_pbh(a: __m512) -> __m256bh { transmute(cvtneps2bf16_512(a.as_f32x16())) @@ -203,6 +216,7 @@ pub unsafe fn _mm512_cvtneps_pbh(a: __m512) -> __m256bh { /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657,1660&avx512techs=AVX512_BF16&text=_mm512_mask_cvtneps_pbh) #[inline] #[target_feature(enable = "avx512bf16,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vcvtneps2bf16"))] pub unsafe fn _mm512_mask_cvtneps_pbh(src: __m256bh, k: __mmask16, a: __m512) -> __m256bh { let cvt = _mm512_cvtneps_pbh(a).as_u16x16(); @@ -215,6 +229,7 @@ pub unsafe fn _mm512_mask_cvtneps_pbh(src: __m256bh, k: __mmask16, a: __m512) -> /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657,1660&avx512techs=AVX512_BF16&text=_mm512_maskz_cvtneps_pbh) #[inline] #[target_feature(enable = "avx512bf16,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vcvtneps2bf16"))] pub unsafe fn _mm512_maskz_cvtneps_pbh(k: __mmask16, a: __m512) -> __m256bh { let cvt = _mm512_cvtneps_pbh(a).as_u16x16(); @@ -228,6 +243,7 @@ pub unsafe fn _mm512_maskz_cvtneps_pbh(k: __mmask16, a: __m512) -> __m256bh { /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657,1660&avx512techs=AVX512_BF16&text=_mm_dpbf16_ps) #[inline] #[target_feature(enable = "avx512bf16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vdpbf16ps"))] pub unsafe fn _mm_dpbf16_ps(src: __m128, a: __m128bh, b: __m128bh) -> __m128 { transmute(dpbf16ps(src.as_f32x4(), a.as_i32x4(), b.as_i32x4())) @@ -240,6 +256,7 @@ pub unsafe fn _mm_dpbf16_ps(src: __m128, a: __m128bh, b: __m128bh) -> __m128 { /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657,1660&avx512techs=AVX512_BF16&text=_mm_mask_dpbf16_ps) #[inline] #[target_feature(enable = "avx512bf16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vdpbf16ps"))] pub unsafe fn _mm_mask_dpbf16_ps(src: __m128, k: __mmask8, a: __m128bh, b: __m128bh) -> __m128 { let rst = _mm_dpbf16_ps(src, a, b).as_f32x4(); @@ -253,6 +270,7 @@ pub unsafe fn _mm_mask_dpbf16_ps(src: __m128, k: __mmask8, a: __m128bh, b: __m12 /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657,1660&avx512techs=AVX512_BF16&text=_mm_maskz_dpbf16_ps) #[inline] #[target_feature(enable = "avx512bf16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vdpbf16ps"))] pub unsafe fn _mm_maskz_dpbf16_ps(k: __mmask8, src: __m128, a: __m128bh, b: __m128bh) -> __m128 { let rst = _mm_dpbf16_ps(src, a, b).as_f32x4(); @@ -266,6 +284,7 @@ pub unsafe fn _mm_maskz_dpbf16_ps(k: __mmask8, src: __m128, a: __m128bh, b: __m1 /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657,1660&avx512techs=AVX512_BF16&text=_mm256_dpbf16_ps) #[inline] #[target_feature(enable = "avx512bf16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vdpbf16ps"))] pub unsafe fn _mm256_dpbf16_ps(src: __m256, a: __m256bh, b: __m256bh) -> __m256 { transmute(dpbf16ps_256(src.as_f32x8(), a.as_i32x8(), b.as_i32x8())) @@ -278,6 +297,7 @@ pub unsafe fn _mm256_dpbf16_ps(src: __m256, a: __m256bh, b: __m256bh) -> __m256 /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657,1660&avx512techs=AVX512_BF16&text=_mm256_mask_dpbf16_ps) #[inline] #[target_feature(enable = "avx512bf16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vdpbf16ps"))] pub unsafe fn _mm256_mask_dpbf16_ps(src: __m256, k: __mmask8, a: __m256bh, b: __m256bh) -> __m256 { let rst = _mm256_dpbf16_ps(src, a, b).as_f32x8(); @@ -291,6 +311,7 @@ pub unsafe fn _mm256_mask_dpbf16_ps(src: __m256, k: __mmask8, a: __m256bh, b: __ /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657,1660&avx512techs=AVX512_BF16&text=_mm256_maskz_dpbf16_ps) #[inline] #[target_feature(enable = "avx512bf16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vdpbf16ps"))] pub unsafe fn _mm256_maskz_dpbf16_ps(k: __mmask8, src: __m256, a: __m256bh, b: __m256bh) -> __m256 { let rst = _mm256_dpbf16_ps(src, a, b).as_f32x8(); @@ -306,6 +327,7 @@ pub unsafe fn _mm256_maskz_dpbf16_ps(k: __mmask8, src: __m256, a: __m256bh, b: _ /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657,1660&avx512techs=AVX512_BF16&text=_mm512_dpbf16_ps) #[inline] #[target_feature(enable = "avx512bf16,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vdpbf16ps"))] pub unsafe fn _mm512_dpbf16_ps(src: __m512, a: __m512bh, b: __m512bh) -> __m512 { transmute(dpbf16ps_512(src.as_f32x16(), a.as_i32x16(), b.as_i32x16())) @@ -318,6 +340,7 @@ pub unsafe fn _mm512_dpbf16_ps(src: __m512, a: __m512bh, b: __m512bh) -> __m512 /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657,1660&avx512techs=AVX512_BF16&text=_mm512_mask_dpbf16_ps) #[inline] #[target_feature(enable = "avx512bf16,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vdpbf16ps"))] pub unsafe fn _mm512_mask_dpbf16_ps(src: __m512, k: __mmask16, a: __m512bh, b: __m512bh) -> __m512 { let rst = _mm512_dpbf16_ps(src, a, b).as_f32x16(); @@ -331,6 +354,7 @@ pub unsafe fn _mm512_mask_dpbf16_ps(src: __m512, k: __mmask16, a: __m512bh, b: _ /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654,1657,1660&avx512techs=AVX512_BF16&text=_mm512_maskz_dpbf16_ps) #[inline] #[target_feature(enable = "avx512bf16,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr("vdpbf16ps"))] pub unsafe fn _mm512_maskz_dpbf16_ps( k: __mmask16, diff --git a/crates/core_arch/src/x86/avx512bitalg.rs b/crates/core_arch/src/x86/avx512bitalg.rs index ce4e402a8c..48494b2e45 100644 --- a/crates/core_arch/src/x86/avx512bitalg.rs +++ b/crates/core_arch/src/x86/avx512bitalg.rs @@ -61,6 +61,7 @@ extern "C" { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_popcnt_epi16) #[inline] #[target_feature(enable = "avx512bitalg")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntw))] pub unsafe fn _mm512_popcnt_epi16(a: __m512i) -> __m512i { transmute(popcnt_v32i16(a.as_i16x32())) @@ -74,6 +75,7 @@ pub unsafe fn _mm512_popcnt_epi16(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_popcnt_epi16) #[inline] #[target_feature(enable = "avx512bitalg")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntw))] pub unsafe fn _mm512_maskz_popcnt_epi16(k: __mmask32, a: __m512i) -> __m512i { let zero = _mm512_setzero_si512().as_i16x32(); @@ -88,6 +90,7 @@ pub unsafe fn _mm512_maskz_popcnt_epi16(k: __mmask32, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_popcnt_epi16) #[inline] #[target_feature(enable = "avx512bitalg")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntw))] pub unsafe fn _mm512_mask_popcnt_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i { transmute(simd_select_bitmask( @@ -102,6 +105,7 @@ pub unsafe fn _mm512_mask_popcnt_epi16(src: __m512i, k: __mmask32, a: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_popcnt_epi16) #[inline] #[target_feature(enable = "avx512bitalg,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntw))] pub unsafe fn _mm256_popcnt_epi16(a: __m256i) -> __m256i { transmute(popcnt_v16i16(a.as_i16x16())) @@ -115,6 +119,7 @@ pub unsafe fn _mm256_popcnt_epi16(a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_popcnt_epi16) #[inline] #[target_feature(enable = "avx512bitalg,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntw))] pub unsafe fn _mm256_maskz_popcnt_epi16(k: __mmask16, a: __m256i) -> __m256i { let zero = _mm256_setzero_si256().as_i16x16(); @@ -129,6 +134,7 @@ pub unsafe fn _mm256_maskz_popcnt_epi16(k: __mmask16, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_popcnt_epi16) #[inline] #[target_feature(enable = "avx512bitalg,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntw))] pub unsafe fn _mm256_mask_popcnt_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i { transmute(simd_select_bitmask( @@ -143,6 +149,7 @@ pub unsafe fn _mm256_mask_popcnt_epi16(src: __m256i, k: __mmask16, a: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_epi16) #[inline] #[target_feature(enable = "avx512bitalg,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntw))] pub unsafe fn _mm_popcnt_epi16(a: __m128i) -> __m128i { transmute(popcnt_v8i16(a.as_i16x8())) @@ -156,6 +163,7 @@ pub unsafe fn _mm_popcnt_epi16(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_popcnt_epi16) #[inline] #[target_feature(enable = "avx512bitalg,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntw))] pub unsafe fn _mm_maskz_popcnt_epi16(k: __mmask8, a: __m128i) -> __m128i { let zero = _mm_setzero_si128().as_i16x8(); @@ -170,6 +178,7 @@ pub unsafe fn _mm_maskz_popcnt_epi16(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_popcnt_epi16) #[inline] #[target_feature(enable = "avx512bitalg,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntw))] pub unsafe fn _mm_mask_popcnt_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(simd_select_bitmask( @@ -184,6 +193,7 @@ pub unsafe fn _mm_mask_popcnt_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_popcnt_epi8) #[inline] #[target_feature(enable = "avx512bitalg")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntb))] pub unsafe fn _mm512_popcnt_epi8(a: __m512i) -> __m512i { transmute(popcnt_v64i8(a.as_i8x64())) @@ -197,6 +207,7 @@ pub unsafe fn _mm512_popcnt_epi8(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_popcnt_epi8) #[inline] #[target_feature(enable = "avx512bitalg")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntb))] pub unsafe fn _mm512_maskz_popcnt_epi8(k: __mmask64, a: __m512i) -> __m512i { let zero = _mm512_setzero_si512().as_i8x64(); @@ -211,6 +222,7 @@ pub unsafe fn _mm512_maskz_popcnt_epi8(k: __mmask64, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_popcnt_epi8) #[inline] #[target_feature(enable = "avx512bitalg")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntb))] pub unsafe fn _mm512_mask_popcnt_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __m512i { transmute(simd_select_bitmask( @@ -225,6 +237,7 @@ pub unsafe fn _mm512_mask_popcnt_epi8(src: __m512i, k: __mmask64, a: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_popcnt_epi8) #[inline] #[target_feature(enable = "avx512bitalg,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntb))] pub unsafe fn _mm256_popcnt_epi8(a: __m256i) -> __m256i { transmute(popcnt_v32i8(a.as_i8x32())) @@ -238,6 +251,7 @@ pub unsafe fn _mm256_popcnt_epi8(a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_popcnt_epi8) #[inline] #[target_feature(enable = "avx512bitalg,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntb))] pub unsafe fn _mm256_maskz_popcnt_epi8(k: __mmask32, a: __m256i) -> __m256i { let zero = _mm256_setzero_si256().as_i8x32(); @@ -252,6 +266,7 @@ pub unsafe fn _mm256_maskz_popcnt_epi8(k: __mmask32, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_popcnt_epi8) #[inline] #[target_feature(enable = "avx512bitalg,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntb))] pub unsafe fn _mm256_mask_popcnt_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __m256i { transmute(simd_select_bitmask( @@ -266,6 +281,7 @@ pub unsafe fn _mm256_mask_popcnt_epi8(src: __m256i, k: __mmask32, a: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_epi8) #[inline] #[target_feature(enable = "avx512bitalg,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntb))] pub unsafe fn _mm_popcnt_epi8(a: __m128i) -> __m128i { transmute(popcnt_v16i8(a.as_i8x16())) @@ -279,6 +295,7 @@ pub unsafe fn _mm_popcnt_epi8(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_popcnt_epi8) #[inline] #[target_feature(enable = "avx512bitalg,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntb))] pub unsafe fn _mm_maskz_popcnt_epi8(k: __mmask16, a: __m128i) -> __m128i { let zero = _mm_setzero_si128().as_i8x16(); @@ -293,6 +310,7 @@ pub unsafe fn _mm_maskz_popcnt_epi8(k: __mmask16, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_popcnt_epi8) #[inline] #[target_feature(enable = "avx512bitalg,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntb))] pub unsafe fn _mm_mask_popcnt_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m128i { transmute(simd_select_bitmask( @@ -309,6 +327,7 @@ pub unsafe fn _mm_mask_popcnt_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_bitshuffle_epi64_mask) #[inline] #[target_feature(enable = "avx512bitalg")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufbitqmb))] pub unsafe fn _mm512_bitshuffle_epi64_mask(b: __m512i, c: __m512i) -> __mmask64 { bitshuffle_512(b.as_i8x64(), c.as_i8x64(), !0) @@ -324,6 +343,7 @@ pub unsafe fn _mm512_bitshuffle_epi64_mask(b: __m512i, c: __m512i) -> __mmask64 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_bitshuffle_epi64_mask) #[inline] #[target_feature(enable = "avx512bitalg")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufbitqmb))] pub unsafe fn _mm512_mask_bitshuffle_epi64_mask(k: __mmask64, b: __m512i, c: __m512i) -> __mmask64 { bitshuffle_512(b.as_i8x64(), c.as_i8x64(), k) @@ -336,6 +356,7 @@ pub unsafe fn _mm512_mask_bitshuffle_epi64_mask(k: __mmask64, b: __m512i, c: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_bitshuffle_epi64_mask) #[inline] #[target_feature(enable = "avx512bitalg,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufbitqmb))] pub unsafe fn _mm256_bitshuffle_epi64_mask(b: __m256i, c: __m256i) -> __mmask32 { bitshuffle_256(b.as_i8x32(), c.as_i8x32(), !0) @@ -351,6 +372,7 @@ pub unsafe fn _mm256_bitshuffle_epi64_mask(b: __m256i, c: __m256i) -> __mmask32 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_bitshuffle_epi64_mask) #[inline] #[target_feature(enable = "avx512bitalg,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufbitqmb))] pub unsafe fn _mm256_mask_bitshuffle_epi64_mask(k: __mmask32, b: __m256i, c: __m256i) -> __mmask32 { bitshuffle_256(b.as_i8x32(), c.as_i8x32(), k) @@ -363,6 +385,7 @@ pub unsafe fn _mm256_mask_bitshuffle_epi64_mask(k: __mmask32, b: __m256i, c: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_bitshuffle_epi64_mask) #[inline] #[target_feature(enable = "avx512bitalg,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufbitqmb))] pub unsafe fn _mm_bitshuffle_epi64_mask(b: __m128i, c: __m128i) -> __mmask16 { bitshuffle_128(b.as_i8x16(), c.as_i8x16(), !0) @@ -378,6 +401,7 @@ pub unsafe fn _mm_bitshuffle_epi64_mask(b: __m128i, c: __m128i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_bitshuffle_epi64_mask) #[inline] #[target_feature(enable = "avx512bitalg,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufbitqmb))] pub unsafe fn _mm_mask_bitshuffle_epi64_mask(k: __mmask16, b: __m128i, c: __m128i) -> __mmask16 { bitshuffle_128(b.as_i8x16(), c.as_i8x16(), k) diff --git a/crates/core_arch/src/x86/avx512bw.rs b/crates/core_arch/src/x86/avx512bw.rs index 0b4a56d365..a60bd3aecc 100644 --- a/crates/core_arch/src/x86/avx512bw.rs +++ b/crates/core_arch/src/x86/avx512bw.rs @@ -15,6 +15,7 @@ use super::avx512f::{vpl, vps}; /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_abs_epi16&expand=30) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsw))] pub unsafe fn _mm512_abs_epi16(a: __m512i) -> __m512i { let a = a.as_i16x32(); @@ -30,6 +31,7 @@ pub unsafe fn _mm512_abs_epi16(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_abs_epi16&expand=31) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsw))] pub unsafe fn _mm512_mask_abs_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i { let abs = _mm512_abs_epi16(a).as_i16x32(); @@ -41,6 +43,7 @@ pub unsafe fn _mm512_mask_abs_epi16(src: __m512i, k: __mmask32, a: __m512i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_abs_epi16&expand=32) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsw))] pub unsafe fn _mm512_maskz_abs_epi16(k: __mmask32, a: __m512i) -> __m512i { let abs = _mm512_abs_epi16(a).as_i16x32(); @@ -53,6 +56,7 @@ pub unsafe fn _mm512_maskz_abs_epi16(k: __mmask32, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_abs_epi16&expand=28) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsw))] pub unsafe fn _mm256_mask_abs_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i { let abs = _mm256_abs_epi16(a).as_i16x16(); @@ -64,6 +68,7 @@ pub unsafe fn _mm256_mask_abs_epi16(src: __m256i, k: __mmask16, a: __m256i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_abs_epi16&expand=29) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsw))] pub unsafe fn _mm256_maskz_abs_epi16(k: __mmask16, a: __m256i) -> __m256i { let abs = _mm256_abs_epi16(a).as_i16x16(); @@ -76,6 +81,7 @@ pub unsafe fn _mm256_maskz_abs_epi16(k: __mmask16, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_abs_epi16&expand=25) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsw))] pub unsafe fn _mm_mask_abs_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let abs = _mm_abs_epi16(a).as_i16x8(); @@ -87,6 +93,7 @@ pub unsafe fn _mm_mask_abs_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_abs_epi16&expand=26) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsw))] pub unsafe fn _mm_maskz_abs_epi16(k: __mmask8, a: __m128i) -> __m128i { let abs = _mm_abs_epi16(a).as_i16x8(); @@ -99,6 +106,7 @@ pub unsafe fn _mm_maskz_abs_epi16(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_abs_epi8&expand=57) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsb))] pub unsafe fn _mm512_abs_epi8(a: __m512i) -> __m512i { let a = a.as_i8x64(); @@ -114,6 +122,7 @@ pub unsafe fn _mm512_abs_epi8(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_abs_epi8&expand=58) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsb))] pub unsafe fn _mm512_mask_abs_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __m512i { let abs = _mm512_abs_epi8(a).as_i8x64(); @@ -125,6 +134,7 @@ pub unsafe fn _mm512_mask_abs_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_abs_epi8&expand=59) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsb))] pub unsafe fn _mm512_maskz_abs_epi8(k: __mmask64, a: __m512i) -> __m512i { let abs = _mm512_abs_epi8(a).as_i8x64(); @@ -137,6 +147,7 @@ pub unsafe fn _mm512_maskz_abs_epi8(k: __mmask64, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_abs_epi8&expand=55) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsb))] pub unsafe fn _mm256_mask_abs_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __m256i { let abs = _mm256_abs_epi8(a).as_i8x32(); @@ -148,6 +159,7 @@ pub unsafe fn _mm256_mask_abs_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_abs_epi8&expand=56) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsb))] pub unsafe fn _mm256_maskz_abs_epi8(k: __mmask32, a: __m256i) -> __m256i { let abs = _mm256_abs_epi8(a).as_i8x32(); @@ -160,6 +172,7 @@ pub unsafe fn _mm256_maskz_abs_epi8(k: __mmask32, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_abs_epi8&expand=52) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsb))] pub unsafe fn _mm_mask_abs_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m128i { let abs = _mm_abs_epi8(a).as_i8x16(); @@ -171,6 +184,7 @@ pub unsafe fn _mm_mask_abs_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_abs_epi8&expand=53) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsb))] pub unsafe fn _mm_maskz_abs_epi8(k: __mmask16, a: __m128i) -> __m128i { let abs = _mm_abs_epi8(a).as_i8x16(); @@ -183,6 +197,7 @@ pub unsafe fn _mm_maskz_abs_epi8(k: __mmask16, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_epi16&expand=91) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddw))] pub unsafe fn _mm512_add_epi16(a: __m512i, b: __m512i) -> __m512i { transmute(simd_add(a.as_i16x32(), b.as_i16x32())) @@ -193,6 +208,7 @@ pub unsafe fn _mm512_add_epi16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_epi16&expand=92) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddw))] pub unsafe fn _mm512_mask_add_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let add = _mm512_add_epi16(a, b).as_i16x32(); @@ -204,6 +220,7 @@ pub unsafe fn _mm512_mask_add_epi16(src: __m512i, k: __mmask32, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_epi16&expand=93) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddw))] pub unsafe fn _mm512_maskz_add_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let add = _mm512_add_epi16(a, b).as_i16x32(); @@ -216,6 +233,7 @@ pub unsafe fn _mm512_maskz_add_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_add_epi&expand=89) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddw))] pub unsafe fn _mm256_mask_add_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let add = _mm256_add_epi16(a, b).as_i16x16(); @@ -227,6 +245,7 @@ pub unsafe fn _mm256_mask_add_epi16(src: __m256i, k: __mmask16, a: __m256i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_add_epi16&expand=90) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddw))] pub unsafe fn _mm256_maskz_add_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let add = _mm256_add_epi16(a, b).as_i16x16(); @@ -239,6 +258,7 @@ pub unsafe fn _mm256_maskz_add_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_add_epi16&expand=86) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddw))] pub unsafe fn _mm_mask_add_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let add = _mm_add_epi16(a, b).as_i16x8(); @@ -250,6 +270,7 @@ pub unsafe fn _mm_mask_add_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_epi16&expand=87) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddw))] pub unsafe fn _mm_maskz_add_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let add = _mm_add_epi16(a, b).as_i16x8(); @@ -262,6 +283,7 @@ pub unsafe fn _mm_maskz_add_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_epi8&expand=118) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddb))] pub unsafe fn _mm512_add_epi8(a: __m512i, b: __m512i) -> __m512i { transmute(simd_add(a.as_i8x64(), b.as_i8x64())) @@ -272,6 +294,7 @@ pub unsafe fn _mm512_add_epi8(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_epi8&expand=119) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddb))] pub unsafe fn _mm512_mask_add_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let add = _mm512_add_epi8(a, b).as_i8x64(); @@ -283,6 +306,7 @@ pub unsafe fn _mm512_mask_add_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_epi8&expand=120) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddb))] pub unsafe fn _mm512_maskz_add_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let add = _mm512_add_epi8(a, b).as_i8x64(); @@ -295,6 +319,7 @@ pub unsafe fn _mm512_maskz_add_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_add_epi8&expand=116) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddb))] pub unsafe fn _mm256_mask_add_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let add = _mm256_add_epi8(a, b).as_i8x32(); @@ -306,6 +331,7 @@ pub unsafe fn _mm256_mask_add_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_add_epi8&expand=117) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddb))] pub unsafe fn _mm256_maskz_add_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let add = _mm256_add_epi8(a, b).as_i8x32(); @@ -318,6 +344,7 @@ pub unsafe fn _mm256_maskz_add_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_add_epi8&expand=113) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddb))] pub unsafe fn _mm_mask_add_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let add = _mm_add_epi8(a, b).as_i8x16(); @@ -329,6 +356,7 @@ pub unsafe fn _mm_mask_add_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_epi8&expand=114) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddb))] pub unsafe fn _mm_maskz_add_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let add = _mm_add_epi8(a, b).as_i8x16(); @@ -341,6 +369,7 @@ pub unsafe fn _mm_maskz_add_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_adds_epu16&expand=197) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddusw))] pub unsafe fn _mm512_adds_epu16(a: __m512i, b: __m512i) -> __m512i { transmute(vpaddusw( @@ -356,6 +385,7 @@ pub unsafe fn _mm512_adds_epu16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_adds_epu16&expand=198) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddusw))] pub unsafe fn _mm512_mask_adds_epu16( src: __m512i, @@ -371,6 +401,7 @@ pub unsafe fn _mm512_mask_adds_epu16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_adds_epu16&expand=199) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddusw))] pub unsafe fn _mm512_maskz_adds_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { transmute(vpaddusw( @@ -386,6 +417,7 @@ pub unsafe fn _mm512_maskz_adds_epu16(k: __mmask32, a: __m512i, b: __m512i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_adds_epu16&expand=195) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddusw))] pub unsafe fn _mm256_mask_adds_epu16( src: __m256i, @@ -406,6 +438,7 @@ pub unsafe fn _mm256_mask_adds_epu16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_adds_epu16&expand=196) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddusw))] pub unsafe fn _mm256_maskz_adds_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { transmute(vpaddusw256( @@ -421,6 +454,7 @@ pub unsafe fn _mm256_maskz_adds_epu16(k: __mmask16, a: __m256i, b: __m256i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_adds_epu16&expand=192) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddusw))] pub unsafe fn _mm_mask_adds_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { transmute(vpaddusw128(a.as_u16x8(), b.as_u16x8(), src.as_u16x8(), k)) @@ -431,6 +465,7 @@ pub unsafe fn _mm_mask_adds_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_adds_epu16&expand=193) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddusw))] pub unsafe fn _mm_maskz_adds_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { transmute(vpaddusw128( @@ -446,6 +481,7 @@ pub unsafe fn _mm_maskz_adds_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_adds_epu8&expand=206) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddusb))] pub unsafe fn _mm512_adds_epu8(a: __m512i, b: __m512i) -> __m512i { transmute(vpaddusb( @@ -461,6 +497,7 @@ pub unsafe fn _mm512_adds_epu8(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_adds_epu8&expand=207) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddusb))] pub unsafe fn _mm512_mask_adds_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { transmute(vpaddusb(a.as_u8x64(), b.as_u8x64(), src.as_u8x64(), k)) @@ -471,6 +508,7 @@ pub unsafe fn _mm512_mask_adds_epu8(src: __m512i, k: __mmask64, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_adds_epu8&expand=208) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddusb))] pub unsafe fn _mm512_maskz_adds_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { transmute(vpaddusb( @@ -486,6 +524,7 @@ pub unsafe fn _mm512_maskz_adds_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_adds_epu8&expand=204) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddusb))] pub unsafe fn _mm256_mask_adds_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { transmute(vpaddusb256(a.as_u8x32(), b.as_u8x32(), src.as_u8x32(), k)) @@ -496,6 +535,7 @@ pub unsafe fn _mm256_mask_adds_epu8(src: __m256i, k: __mmask32, a: __m256i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_adds_epu8&expand=205) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddusb))] pub unsafe fn _mm256_maskz_adds_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { transmute(vpaddusb256( @@ -511,6 +551,7 @@ pub unsafe fn _mm256_maskz_adds_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_adds_epu8&expand=201) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddusb))] pub unsafe fn _mm_mask_adds_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { transmute(vpaddusb128(a.as_u8x16(), b.as_u8x16(), src.as_u8x16(), k)) @@ -521,6 +562,7 @@ pub unsafe fn _mm_mask_adds_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_adds_epu8&expand=202) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddusb))] pub unsafe fn _mm_maskz_adds_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { transmute(vpaddusb128( @@ -536,6 +578,7 @@ pub unsafe fn _mm_maskz_adds_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_adds_epi16&expand=179) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddsw))] pub unsafe fn _mm512_adds_epi16(a: __m512i, b: __m512i) -> __m512i { transmute(vpaddsw( @@ -551,6 +594,7 @@ pub unsafe fn _mm512_adds_epi16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_adds_epi16&expand=180) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddsw))] pub unsafe fn _mm512_mask_adds_epi16( src: __m512i, @@ -566,6 +610,7 @@ pub unsafe fn _mm512_mask_adds_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_adds_epi16&expand=181) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddsw))] pub unsafe fn _mm512_maskz_adds_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { transmute(vpaddsw( @@ -581,6 +626,7 @@ pub unsafe fn _mm512_maskz_adds_epi16(k: __mmask32, a: __m512i, b: __m512i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_adds_epi16&expand=177) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddsw))] pub unsafe fn _mm256_mask_adds_epi16( src: __m256i, @@ -596,6 +642,7 @@ pub unsafe fn _mm256_mask_adds_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_adds_epi16&expand=178) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddsw))] pub unsafe fn _mm256_maskz_adds_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { transmute(vpaddsw256( @@ -611,6 +658,7 @@ pub unsafe fn _mm256_maskz_adds_epi16(k: __mmask16, a: __m256i, b: __m256i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_adds_epi16&expand=174) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddsw))] pub unsafe fn _mm_mask_adds_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { transmute(vpaddsw128(a.as_i16x8(), b.as_i16x8(), src.as_i16x8(), k)) @@ -621,6 +669,7 @@ pub unsafe fn _mm_mask_adds_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_adds_epi16&expand=175) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddsw))] pub unsafe fn _mm_maskz_adds_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { transmute(vpaddsw128( @@ -636,6 +685,7 @@ pub unsafe fn _mm_maskz_adds_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_adds_epi8&expand=188) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddsb))] pub unsafe fn _mm512_adds_epi8(a: __m512i, b: __m512i) -> __m512i { transmute(vpaddsb( @@ -651,6 +701,7 @@ pub unsafe fn _mm512_adds_epi8(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_adds_epi8&expand=189) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddsb))] pub unsafe fn _mm512_mask_adds_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { transmute(vpaddsb(a.as_i8x64(), b.as_i8x64(), src.as_i8x64(), k)) @@ -661,6 +712,7 @@ pub unsafe fn _mm512_mask_adds_epi8(src: __m512i, k: __mmask64, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_adds_epi8&expand=190) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddsb))] pub unsafe fn _mm512_maskz_adds_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { transmute(vpaddsb( @@ -676,6 +728,7 @@ pub unsafe fn _mm512_maskz_adds_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_adds_epi8&expand=186) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddsb))] pub unsafe fn _mm256_mask_adds_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { transmute(vpaddsb256(a.as_i8x32(), b.as_i8x32(), src.as_i8x32(), k)) @@ -686,6 +739,7 @@ pub unsafe fn _mm256_mask_adds_epi8(src: __m256i, k: __mmask32, a: __m256i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_adds_epi8&expand=187) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddsb))] pub unsafe fn _mm256_maskz_adds_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { transmute(vpaddsb256( @@ -701,6 +755,7 @@ pub unsafe fn _mm256_maskz_adds_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_adds_epi8&expand=183) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddsb))] pub unsafe fn _mm_mask_adds_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { transmute(vpaddsb128(a.as_i8x16(), b.as_i8x16(), src.as_i8x16(), k)) @@ -711,6 +766,7 @@ pub unsafe fn _mm_mask_adds_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_adds_epi8&expand=184) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddsb))] pub unsafe fn _mm_maskz_adds_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { transmute(vpaddsb128( @@ -726,6 +782,7 @@ pub unsafe fn _mm_maskz_adds_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_epi16&expand=5685) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubw))] pub unsafe fn _mm512_sub_epi16(a: __m512i, b: __m512i) -> __m512i { transmute(simd_sub(a.as_i16x32(), b.as_i16x32())) @@ -736,6 +793,7 @@ pub unsafe fn _mm512_sub_epi16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_epi16&expand=5683) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubw))] pub unsafe fn _mm512_mask_sub_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let sub = _mm512_sub_epi16(a, b).as_i16x32(); @@ -747,6 +805,7 @@ pub unsafe fn _mm512_mask_sub_epi16(src: __m512i, k: __mmask32, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_epi16&expand=5684) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubw))] pub unsafe fn _mm512_maskz_sub_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let sub = _mm512_sub_epi16(a, b).as_i16x32(); @@ -759,6 +818,7 @@ pub unsafe fn _mm512_maskz_sub_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sub_epi16&expand=5680) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubw))] pub unsafe fn _mm256_mask_sub_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let sub = _mm256_sub_epi16(a, b).as_i16x16(); @@ -770,6 +830,7 @@ pub unsafe fn _mm256_mask_sub_epi16(src: __m256i, k: __mmask16, a: __m256i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sub_epi16&expand=5681) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubw))] pub unsafe fn _mm256_maskz_sub_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let sub = _mm256_sub_epi16(a, b).as_i16x16(); @@ -782,6 +843,7 @@ pub unsafe fn _mm256_maskz_sub_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sub_epi16&expand=5677) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubw))] pub unsafe fn _mm_mask_sub_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let sub = _mm_sub_epi16(a, b).as_i16x8(); @@ -793,6 +855,7 @@ pub unsafe fn _mm_mask_sub_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sub_epi16&expand=5678) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubw))] pub unsafe fn _mm_maskz_sub_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let sub = _mm_sub_epi16(a, b).as_i16x8(); @@ -805,6 +868,7 @@ pub unsafe fn _mm_maskz_sub_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_epi8&expand=5712) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubb))] pub unsafe fn _mm512_sub_epi8(a: __m512i, b: __m512i) -> __m512i { transmute(simd_sub(a.as_i8x64(), b.as_i8x64())) @@ -815,6 +879,7 @@ pub unsafe fn _mm512_sub_epi8(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_epi8&expand=5710) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubb))] pub unsafe fn _mm512_mask_sub_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let sub = _mm512_sub_epi8(a, b).as_i8x64(); @@ -826,6 +891,7 @@ pub unsafe fn _mm512_mask_sub_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_epi8&expand=5711) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubb))] pub unsafe fn _mm512_maskz_sub_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let sub = _mm512_sub_epi8(a, b).as_i8x64(); @@ -838,6 +904,7 @@ pub unsafe fn _mm512_maskz_sub_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sub_epi8&expand=5707) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubb))] pub unsafe fn _mm256_mask_sub_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let sub = _mm256_sub_epi8(a, b).as_i8x32(); @@ -849,6 +916,7 @@ pub unsafe fn _mm256_mask_sub_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sub_epi8&expand=5708) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubb))] pub unsafe fn _mm256_maskz_sub_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let sub = _mm256_sub_epi8(a, b).as_i8x32(); @@ -861,6 +929,7 @@ pub unsafe fn _mm256_maskz_sub_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sub_epi8&expand=5704) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubb))] pub unsafe fn _mm_mask_sub_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let sub = _mm_sub_epi8(a, b).as_i8x16(); @@ -872,6 +941,7 @@ pub unsafe fn _mm_mask_sub_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sub_epi8&expand=5705) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubb))] pub unsafe fn _mm_maskz_sub_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let sub = _mm_sub_epi8(a, b).as_i8x16(); @@ -884,6 +954,7 @@ pub unsafe fn _mm_maskz_sub_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_subs_epu16&expand=5793) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubusw))] pub unsafe fn _mm512_subs_epu16(a: __m512i, b: __m512i) -> __m512i { transmute(vpsubusw( @@ -899,6 +970,7 @@ pub unsafe fn _mm512_subs_epu16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_subs_epu16&expand=5791) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubusw))] pub unsafe fn _mm512_mask_subs_epu16( src: __m512i, @@ -914,6 +986,7 @@ pub unsafe fn _mm512_mask_subs_epu16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_subs_epu16&expand=5792) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubusw))] pub unsafe fn _mm512_maskz_subs_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { transmute(vpsubusw( @@ -929,6 +1002,7 @@ pub unsafe fn _mm512_maskz_subs_epu16(k: __mmask32, a: __m512i, b: __m512i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_subs_epu16&expand=5788) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubusw))] pub unsafe fn _mm256_mask_subs_epu16( src: __m256i, @@ -949,6 +1023,7 @@ pub unsafe fn _mm256_mask_subs_epu16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_subs_epu16&expand=5789) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubusw))] pub unsafe fn _mm256_maskz_subs_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { transmute(vpsubusw256( @@ -964,6 +1039,7 @@ pub unsafe fn _mm256_maskz_subs_epu16(k: __mmask16, a: __m256i, b: __m256i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_subs_epu16&expand=5785) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubusw))] pub unsafe fn _mm_mask_subs_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { transmute(vpsubusw128(a.as_u16x8(), b.as_u16x8(), src.as_u16x8(), k)) @@ -974,6 +1050,7 @@ pub unsafe fn _mm_mask_subs_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_subs_epu16&expand=5786) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubusw))] pub unsafe fn _mm_maskz_subs_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { transmute(vpsubusw128( @@ -989,6 +1066,7 @@ pub unsafe fn _mm_maskz_subs_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_subs_epu8&expand=5802) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubusb))] pub unsafe fn _mm512_subs_epu8(a: __m512i, b: __m512i) -> __m512i { transmute(vpsubusb( @@ -1004,6 +1082,7 @@ pub unsafe fn _mm512_subs_epu8(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_subs_epu8&expand=5800) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubusb))] pub unsafe fn _mm512_mask_subs_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { transmute(vpsubusb(a.as_u8x64(), b.as_u8x64(), src.as_u8x64(), k)) @@ -1014,6 +1093,7 @@ pub unsafe fn _mm512_mask_subs_epu8(src: __m512i, k: __mmask64, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_subs_epu8&expand=5801) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubusb))] pub unsafe fn _mm512_maskz_subs_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { transmute(vpsubusb( @@ -1029,6 +1109,7 @@ pub unsafe fn _mm512_maskz_subs_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_subs_epu8&expand=5797) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubusb))] pub unsafe fn _mm256_mask_subs_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { transmute(vpsubusb256(a.as_u8x32(), b.as_u8x32(), src.as_u8x32(), k)) @@ -1039,6 +1120,7 @@ pub unsafe fn _mm256_mask_subs_epu8(src: __m256i, k: __mmask32, a: __m256i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_subs_epu8&expand=5798) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubusb))] pub unsafe fn _mm256_maskz_subs_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { transmute(vpsubusb256( @@ -1054,6 +1136,7 @@ pub unsafe fn _mm256_maskz_subs_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_subs_epu8&expand=5794) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubusb))] pub unsafe fn _mm_mask_subs_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { transmute(vpsubusb128(a.as_u8x16(), b.as_u8x16(), src.as_u8x16(), k)) @@ -1064,6 +1147,7 @@ pub unsafe fn _mm_mask_subs_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_subs_epu8&expand=5795) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubusb))] pub unsafe fn _mm_maskz_subs_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { transmute(vpsubusb128( @@ -1079,6 +1163,7 @@ pub unsafe fn _mm_maskz_subs_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_subs_epi16&expand=5775) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubsw))] pub unsafe fn _mm512_subs_epi16(a: __m512i, b: __m512i) -> __m512i { transmute(vpsubsw( @@ -1094,6 +1179,7 @@ pub unsafe fn _mm512_subs_epi16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_subs_epi16&expand=5773) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubsw))] pub unsafe fn _mm512_mask_subs_epi16( src: __m512i, @@ -1109,6 +1195,7 @@ pub unsafe fn _mm512_mask_subs_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_subs_epi16&expand=5774) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubsw))] pub unsafe fn _mm512_maskz_subs_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { transmute(vpsubsw( @@ -1124,6 +1211,7 @@ pub unsafe fn _mm512_maskz_subs_epi16(k: __mmask32, a: __m512i, b: __m512i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_subs_epi16&expand=5770) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubsw))] pub unsafe fn _mm256_mask_subs_epi16( src: __m256i, @@ -1139,6 +1227,7 @@ pub unsafe fn _mm256_mask_subs_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_subs_epi16&expand=5771) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubsw))] pub unsafe fn _mm256_maskz_subs_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { transmute(vpsubsw256( @@ -1154,6 +1243,7 @@ pub unsafe fn _mm256_maskz_subs_epi16(k: __mmask16, a: __m256i, b: __m256i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_subs_epi16&expand=5767) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubsw))] pub unsafe fn _mm_mask_subs_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { transmute(vpsubsw128(a.as_i16x8(), b.as_i16x8(), src.as_i16x8(), k)) @@ -1164,6 +1254,7 @@ pub unsafe fn _mm_mask_subs_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_subs_epi16&expand=5768) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubsw))] pub unsafe fn _mm_maskz_subs_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { transmute(vpsubsw128( @@ -1179,6 +1270,7 @@ pub unsafe fn _mm_maskz_subs_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_subs_epi8&expand=5784) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubsb))] pub unsafe fn _mm512_subs_epi8(a: __m512i, b: __m512i) -> __m512i { transmute(vpsubsb( @@ -1194,6 +1286,7 @@ pub unsafe fn _mm512_subs_epi8(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_subs_epi8&expand=5782) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubsb))] pub unsafe fn _mm512_mask_subs_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { transmute(vpsubsb(a.as_i8x64(), b.as_i8x64(), src.as_i8x64(), k)) @@ -1204,6 +1297,7 @@ pub unsafe fn _mm512_mask_subs_epi8(src: __m512i, k: __mmask64, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_subs_epi8&expand=5783) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubsb))] pub unsafe fn _mm512_maskz_subs_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { transmute(vpsubsb( @@ -1219,6 +1313,7 @@ pub unsafe fn _mm512_maskz_subs_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_subs_epi8&expand=5779) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubsb))] pub unsafe fn _mm256_mask_subs_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { transmute(vpsubsb256(a.as_i8x32(), b.as_i8x32(), src.as_i8x32(), k)) @@ -1229,6 +1324,7 @@ pub unsafe fn _mm256_mask_subs_epi8(src: __m256i, k: __mmask32, a: __m256i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_subs_epi8&expand=5780) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubsb))] pub unsafe fn _mm256_maskz_subs_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { transmute(vpsubsb256( @@ -1244,6 +1340,7 @@ pub unsafe fn _mm256_maskz_subs_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_subs_epi8&expand=5776) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubsb))] pub unsafe fn _mm_mask_subs_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { transmute(vpsubsb128(a.as_i8x16(), b.as_i8x16(), src.as_i8x16(), k)) @@ -1254,6 +1351,7 @@ pub unsafe fn _mm_mask_subs_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_subs_epi8&expand=5777) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubsb))] pub unsafe fn _mm_maskz_subs_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { transmute(vpsubsb128( @@ -1269,6 +1367,7 @@ pub unsafe fn _mm_maskz_subs_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mulhi_epu16&expand=3973) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhuw))] pub unsafe fn _mm512_mulhi_epu16(a: __m512i, b: __m512i) -> __m512i { transmute(vpmulhuw(a.as_u16x32(), b.as_u16x32())) @@ -1279,6 +1378,7 @@ pub unsafe fn _mm512_mulhi_epu16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mulhi_epu16&expand=3971) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhuw))] pub unsafe fn _mm512_mask_mulhi_epu16( src: __m512i, @@ -1295,6 +1395,7 @@ pub unsafe fn _mm512_mask_mulhi_epu16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mulhi_epu16&expand=3972) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhuw))] pub unsafe fn _mm512_maskz_mulhi_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let mul = _mm512_mulhi_epu16(a, b).as_u16x32(); @@ -1307,6 +1408,7 @@ pub unsafe fn _mm512_maskz_mulhi_epu16(k: __mmask32, a: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mulhi_epu16&expand=3968) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhuw))] pub unsafe fn _mm256_mask_mulhi_epu16( src: __m256i, @@ -1323,6 +1425,7 @@ pub unsafe fn _mm256_mask_mulhi_epu16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mulhi_epu16&expand=3969) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhuw))] pub unsafe fn _mm256_maskz_mulhi_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let mul = _mm256_mulhi_epu16(a, b).as_u16x16(); @@ -1335,6 +1438,7 @@ pub unsafe fn _mm256_maskz_mulhi_epu16(k: __mmask16, a: __m256i, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mulhi_epu16&expand=3965) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhuw))] pub unsafe fn _mm_mask_mulhi_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let mul = _mm_mulhi_epu16(a, b).as_u16x8(); @@ -1346,6 +1450,7 @@ pub unsafe fn _mm_mask_mulhi_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mulhi_epu16&expand=3966) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhuw))] pub unsafe fn _mm_maskz_mulhi_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let mul = _mm_mulhi_epu16(a, b).as_u16x8(); @@ -1358,6 +1463,7 @@ pub unsafe fn _mm_maskz_mulhi_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mulhi_epi16&expand=3962) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhw))] pub unsafe fn _mm512_mulhi_epi16(a: __m512i, b: __m512i) -> __m512i { transmute(vpmulhw(a.as_i16x32(), b.as_i16x32())) @@ -1368,6 +1474,7 @@ pub unsafe fn _mm512_mulhi_epi16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mulhi_epi16&expand=3960) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhw))] pub unsafe fn _mm512_mask_mulhi_epi16( src: __m512i, @@ -1384,6 +1491,7 @@ pub unsafe fn _mm512_mask_mulhi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mulhi_epi16&expand=3961) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhw))] pub unsafe fn _mm512_maskz_mulhi_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let mul = _mm512_mulhi_epi16(a, b).as_i16x32(); @@ -1396,6 +1504,7 @@ pub unsafe fn _mm512_maskz_mulhi_epi16(k: __mmask32, a: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mulhi_epi16&expand=3957) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhw))] pub unsafe fn _mm256_mask_mulhi_epi16( src: __m256i, @@ -1412,6 +1521,7 @@ pub unsafe fn _mm256_mask_mulhi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mulhi_epi16&expand=3958) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhw))] pub unsafe fn _mm256_maskz_mulhi_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let mul = _mm256_mulhi_epi16(a, b).as_i16x16(); @@ -1424,6 +1534,7 @@ pub unsafe fn _mm256_maskz_mulhi_epi16(k: __mmask16, a: __m256i, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mulhi_epi16&expand=3954) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhw))] pub unsafe fn _mm_mask_mulhi_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let mul = _mm_mulhi_epi16(a, b).as_i16x8(); @@ -1435,6 +1546,7 @@ pub unsafe fn _mm_mask_mulhi_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mulhi_epi16&expand=3955) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhw))] pub unsafe fn _mm_maskz_mulhi_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let mul = _mm_mulhi_epi16(a, b).as_i16x8(); @@ -1447,6 +1559,7 @@ pub unsafe fn _mm_maskz_mulhi_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mulhrs_epi16&expand=3986) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhrsw))] pub unsafe fn _mm512_mulhrs_epi16(a: __m512i, b: __m512i) -> __m512i { transmute(vpmulhrsw(a.as_i16x32(), b.as_i16x32())) @@ -1457,6 +1570,7 @@ pub unsafe fn _mm512_mulhrs_epi16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mulhrs_epi16&expand=3984) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhrsw))] pub unsafe fn _mm512_mask_mulhrs_epi16( src: __m512i, @@ -1473,6 +1587,7 @@ pub unsafe fn _mm512_mask_mulhrs_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mulhrs_epi16&expand=3985) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhrsw))] pub unsafe fn _mm512_maskz_mulhrs_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let mul = _mm512_mulhrs_epi16(a, b).as_i16x32(); @@ -1485,6 +1600,7 @@ pub unsafe fn _mm512_maskz_mulhrs_epi16(k: __mmask32, a: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mulhrs_epi16&expand=3981) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhrsw))] pub unsafe fn _mm256_mask_mulhrs_epi16( src: __m256i, @@ -1501,6 +1617,7 @@ pub unsafe fn _mm256_mask_mulhrs_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mulhrs_epi16&expand=3982) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhrsw))] pub unsafe fn _mm256_maskz_mulhrs_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let mul = _mm256_mulhrs_epi16(a, b).as_i16x16(); @@ -1513,6 +1630,7 @@ pub unsafe fn _mm256_maskz_mulhrs_epi16(k: __mmask16, a: __m256i, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mulhrs_epi16&expand=3978) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhrsw))] pub unsafe fn _mm_mask_mulhrs_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let mul = _mm_mulhrs_epi16(a, b).as_i16x8(); @@ -1524,6 +1642,7 @@ pub unsafe fn _mm_mask_mulhrs_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mulhrs_epi16&expand=3979) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulhrsw))] pub unsafe fn _mm_maskz_mulhrs_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let mul = _mm_mulhrs_epi16(a, b).as_i16x8(); @@ -1536,6 +1655,7 @@ pub unsafe fn _mm_maskz_mulhrs_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mullo_epi16&expand=3996) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmullw))] pub unsafe fn _mm512_mullo_epi16(a: __m512i, b: __m512i) -> __m512i { transmute(simd_mul(a.as_i16x32(), b.as_i16x32())) @@ -1546,6 +1666,7 @@ pub unsafe fn _mm512_mullo_epi16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mullo_epi16&expand=3994) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmullw))] pub unsafe fn _mm512_mask_mullo_epi16( src: __m512i, @@ -1562,6 +1683,7 @@ pub unsafe fn _mm512_mask_mullo_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mullo_epi16&expand=3995) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmullw))] pub unsafe fn _mm512_maskz_mullo_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let mul = _mm512_mullo_epi16(a, b).as_i16x32(); @@ -1574,6 +1696,7 @@ pub unsafe fn _mm512_maskz_mullo_epi16(k: __mmask32, a: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mullo_epi16&expand=3991) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmullw))] pub unsafe fn _mm256_mask_mullo_epi16( src: __m256i, @@ -1590,6 +1713,7 @@ pub unsafe fn _mm256_mask_mullo_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mullo_epi16&expand=3992) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmullw))] pub unsafe fn _mm256_maskz_mullo_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let mul = _mm256_mullo_epi16(a, b).as_i16x16(); @@ -1602,6 +1726,7 @@ pub unsafe fn _mm256_maskz_mullo_epi16(k: __mmask16, a: __m256i, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mullo_epi16&expand=3988) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmullw))] pub unsafe fn _mm_mask_mullo_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let mul = _mm_mullo_epi16(a, b).as_i16x8(); @@ -1613,6 +1738,7 @@ pub unsafe fn _mm_mask_mullo_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mullo_epi16&expand=3989) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmullw))] pub unsafe fn _mm_maskz_mullo_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let mul = _mm_mullo_epi16(a, b).as_i16x8(); @@ -1625,6 +1751,7 @@ pub unsafe fn _mm_maskz_mullo_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epu16&expand=3609) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxuw))] pub unsafe fn _mm512_max_epu16(a: __m512i, b: __m512i) -> __m512i { transmute(vpmaxuw(a.as_u16x32(), b.as_u16x32())) @@ -1635,6 +1762,7 @@ pub unsafe fn _mm512_max_epu16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_epu16&expand=3607) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxuw))] pub unsafe fn _mm512_mask_max_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let max = _mm512_max_epu16(a, b).as_u16x32(); @@ -1646,6 +1774,7 @@ pub unsafe fn _mm512_mask_max_epu16(src: __m512i, k: __mmask32, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_epu16&expand=3608) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxuw))] pub unsafe fn _mm512_maskz_max_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let max = _mm512_max_epu16(a, b).as_u16x32(); @@ -1658,6 +1787,7 @@ pub unsafe fn _mm512_maskz_max_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_epu16&expand=3604) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxuw))] pub unsafe fn _mm256_mask_max_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let max = _mm256_max_epu16(a, b).as_u16x16(); @@ -1669,6 +1799,7 @@ pub unsafe fn _mm256_mask_max_epu16(src: __m256i, k: __mmask16, a: __m256i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_epu16&expand=3605) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxuw))] pub unsafe fn _mm256_maskz_max_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let max = _mm256_max_epu16(a, b).as_u16x16(); @@ -1681,6 +1812,7 @@ pub unsafe fn _mm256_maskz_max_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_epu16&expand=3601) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxuw))] pub unsafe fn _mm_mask_max_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let max = _mm_max_epu16(a, b).as_u16x8(); @@ -1692,6 +1824,7 @@ pub unsafe fn _mm_mask_max_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_epu16&expand=3602) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxuw))] pub unsafe fn _mm_maskz_max_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let max = _mm_max_epu16(a, b).as_u16x8(); @@ -1704,6 +1837,7 @@ pub unsafe fn _mm_maskz_max_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epu8&expand=3636) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxub))] pub unsafe fn _mm512_max_epu8(a: __m512i, b: __m512i) -> __m512i { transmute(vpmaxub(a.as_u8x64(), b.as_u8x64())) @@ -1714,6 +1848,7 @@ pub unsafe fn _mm512_max_epu8(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_epu8&expand=3634) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxub))] pub unsafe fn _mm512_mask_max_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let max = _mm512_max_epu8(a, b).as_u8x64(); @@ -1725,6 +1860,7 @@ pub unsafe fn _mm512_mask_max_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_epu8&expand=3635) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxub))] pub unsafe fn _mm512_maskz_max_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let max = _mm512_max_epu8(a, b).as_u8x64(); @@ -1737,6 +1873,7 @@ pub unsafe fn _mm512_maskz_max_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_epu8&expand=3631) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxub))] pub unsafe fn _mm256_mask_max_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let max = _mm256_max_epu8(a, b).as_u8x32(); @@ -1748,6 +1885,7 @@ pub unsafe fn _mm256_mask_max_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_epu8&expand=3632) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxub))] pub unsafe fn _mm256_maskz_max_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let max = _mm256_max_epu8(a, b).as_u8x32(); @@ -1760,6 +1898,7 @@ pub unsafe fn _mm256_maskz_max_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_epu8&expand=3628) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxub))] pub unsafe fn _mm_mask_max_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let max = _mm_max_epu8(a, b).as_u8x16(); @@ -1771,6 +1910,7 @@ pub unsafe fn _mm_mask_max_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_epu8&expand=3629) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxub))] pub unsafe fn _mm_maskz_max_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let max = _mm_max_epu8(a, b).as_u8x16(); @@ -1783,6 +1923,7 @@ pub unsafe fn _mm_maskz_max_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epi16&expand=3573) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsw))] pub unsafe fn _mm512_max_epi16(a: __m512i, b: __m512i) -> __m512i { transmute(vpmaxsw(a.as_i16x32(), b.as_i16x32())) @@ -1793,6 +1934,7 @@ pub unsafe fn _mm512_max_epi16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_epi16&expand=3571) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsw))] pub unsafe fn _mm512_mask_max_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let max = _mm512_max_epi16(a, b).as_i16x32(); @@ -1804,6 +1946,7 @@ pub unsafe fn _mm512_mask_max_epi16(src: __m512i, k: __mmask32, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_epi16&expand=3572) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsw))] pub unsafe fn _mm512_maskz_max_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let max = _mm512_max_epi16(a, b).as_i16x32(); @@ -1816,6 +1959,7 @@ pub unsafe fn _mm512_maskz_max_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_epi16&expand=3568) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsw))] pub unsafe fn _mm256_mask_max_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let max = _mm256_max_epi16(a, b).as_i16x16(); @@ -1827,6 +1971,7 @@ pub unsafe fn _mm256_mask_max_epi16(src: __m256i, k: __mmask16, a: __m256i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_epi16&expand=3569) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsw))] pub unsafe fn _mm256_maskz_max_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let max = _mm256_max_epi16(a, b).as_i16x16(); @@ -1839,6 +1984,7 @@ pub unsafe fn _mm256_maskz_max_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_epi16&expand=3565) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsw))] pub unsafe fn _mm_mask_max_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let max = _mm_max_epi16(a, b).as_i16x8(); @@ -1850,6 +1996,7 @@ pub unsafe fn _mm_mask_max_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_epi16&expand=3566) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsw))] pub unsafe fn _mm_maskz_max_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let max = _mm_max_epi16(a, b).as_i16x8(); @@ -1862,6 +2009,7 @@ pub unsafe fn _mm_maskz_max_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epi8&expand=3600) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsb))] pub unsafe fn _mm512_max_epi8(a: __m512i, b: __m512i) -> __m512i { transmute(vpmaxsb(a.as_i8x64(), b.as_i8x64())) @@ -1872,6 +2020,7 @@ pub unsafe fn _mm512_max_epi8(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_epi8&expand=3598) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsb))] pub unsafe fn _mm512_mask_max_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let max = _mm512_max_epi8(a, b).as_i8x64(); @@ -1883,6 +2032,7 @@ pub unsafe fn _mm512_mask_max_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_epi8&expand=3599) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsb))] pub unsafe fn _mm512_maskz_max_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let max = _mm512_max_epi8(a, b).as_i8x64(); @@ -1895,6 +2045,7 @@ pub unsafe fn _mm512_maskz_max_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_epi8&expand=3595) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsb))] pub unsafe fn _mm256_mask_max_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let max = _mm256_max_epi8(a, b).as_i8x32(); @@ -1906,6 +2057,7 @@ pub unsafe fn _mm256_mask_max_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_epi8&expand=3596) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsb))] pub unsafe fn _mm256_maskz_max_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let max = _mm256_max_epi8(a, b).as_i8x32(); @@ -1918,6 +2070,7 @@ pub unsafe fn _mm256_maskz_max_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_epi8&expand=3592) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsb))] pub unsafe fn _mm_mask_max_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let max = _mm_max_epi8(a, b).as_i8x16(); @@ -1929,6 +2082,7 @@ pub unsafe fn _mm_mask_max_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_epi8&expand=3593) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsb))] pub unsafe fn _mm_maskz_max_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let max = _mm_max_epi8(a, b).as_i8x16(); @@ -1941,6 +2095,7 @@ pub unsafe fn _mm_maskz_max_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_epu16&expand=3723) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminuw))] pub unsafe fn _mm512_min_epu16(a: __m512i, b: __m512i) -> __m512i { transmute(vpminuw(a.as_u16x32(), b.as_u16x32())) @@ -1951,6 +2106,7 @@ pub unsafe fn _mm512_min_epu16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_epu16&expand=3721) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminuw))] pub unsafe fn _mm512_mask_min_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let min = _mm512_min_epu16(a, b).as_u16x32(); @@ -1962,6 +2118,7 @@ pub unsafe fn _mm512_mask_min_epu16(src: __m512i, k: __mmask32, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_epu16&expand=3722) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminuw))] pub unsafe fn _mm512_maskz_min_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let min = _mm512_min_epu16(a, b).as_u16x32(); @@ -1974,6 +2131,7 @@ pub unsafe fn _mm512_maskz_min_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_epu16&expand=3718) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminuw))] pub unsafe fn _mm256_mask_min_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let min = _mm256_min_epu16(a, b).as_u16x16(); @@ -1985,6 +2143,7 @@ pub unsafe fn _mm256_mask_min_epu16(src: __m256i, k: __mmask16, a: __m256i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_epu16&expand=3719) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminuw))] pub unsafe fn _mm256_maskz_min_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let min = _mm256_min_epu16(a, b).as_u16x16(); @@ -1997,6 +2156,7 @@ pub unsafe fn _mm256_maskz_min_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_epu16&expand=3715) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminuw))] pub unsafe fn _mm_mask_min_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let min = _mm_min_epu16(a, b).as_u16x8(); @@ -2008,6 +2168,7 @@ pub unsafe fn _mm_mask_min_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_epu16&expand=3716) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminuw))] pub unsafe fn _mm_maskz_min_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let min = _mm_min_epu16(a, b).as_u16x8(); @@ -2020,6 +2181,7 @@ pub unsafe fn _mm_maskz_min_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_epu8&expand=3750) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminub))] pub unsafe fn _mm512_min_epu8(a: __m512i, b: __m512i) -> __m512i { transmute(vpminub(a.as_u8x64(), b.as_u8x64())) @@ -2030,6 +2192,7 @@ pub unsafe fn _mm512_min_epu8(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_epu8&expand=3748) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminub))] pub unsafe fn _mm512_mask_min_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let min = _mm512_min_epu8(a, b).as_u8x64(); @@ -2041,6 +2204,7 @@ pub unsafe fn _mm512_mask_min_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_epu8&expand=3749) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminub))] pub unsafe fn _mm512_maskz_min_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let min = _mm512_min_epu8(a, b).as_u8x64(); @@ -2053,6 +2217,7 @@ pub unsafe fn _mm512_maskz_min_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_epu8&expand=3745) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminub))] pub unsafe fn _mm256_mask_min_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let min = _mm256_min_epu8(a, b).as_u8x32(); @@ -2064,6 +2229,7 @@ pub unsafe fn _mm256_mask_min_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_epu8&expand=3746) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminub))] pub unsafe fn _mm256_maskz_min_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let min = _mm256_min_epu8(a, b).as_u8x32(); @@ -2076,6 +2242,7 @@ pub unsafe fn _mm256_maskz_min_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_epu8&expand=3742) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminub))] pub unsafe fn _mm_mask_min_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let min = _mm_min_epu8(a, b).as_u8x16(); @@ -2087,6 +2254,7 @@ pub unsafe fn _mm_mask_min_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_epu8&expand=3743) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminub))] pub unsafe fn _mm_maskz_min_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let min = _mm_min_epu8(a, b).as_u8x16(); @@ -2099,6 +2267,7 @@ pub unsafe fn _mm_maskz_min_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_epi16&expand=3687) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsw))] pub unsafe fn _mm512_min_epi16(a: __m512i, b: __m512i) -> __m512i { transmute(vpminsw(a.as_i16x32(), b.as_i16x32())) @@ -2109,6 +2278,7 @@ pub unsafe fn _mm512_min_epi16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_epi16&expand=3685) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsw))] pub unsafe fn _mm512_mask_min_epi16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let min = _mm512_min_epi16(a, b).as_i16x32(); @@ -2120,6 +2290,7 @@ pub unsafe fn _mm512_mask_min_epi16(src: __m512i, k: __mmask32, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_epi16&expand=3686) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsw))] pub unsafe fn _mm512_maskz_min_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let min = _mm512_min_epi16(a, b).as_i16x32(); @@ -2132,6 +2303,7 @@ pub unsafe fn _mm512_maskz_min_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_epi16&expand=3682) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsw))] pub unsafe fn _mm256_mask_min_epi16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let min = _mm256_min_epi16(a, b).as_i16x16(); @@ -2143,6 +2315,7 @@ pub unsafe fn _mm256_mask_min_epi16(src: __m256i, k: __mmask16, a: __m256i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_epi16&expand=3683) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsw))] pub unsafe fn _mm256_maskz_min_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let min = _mm256_min_epi16(a, b).as_i16x16(); @@ -2155,6 +2328,7 @@ pub unsafe fn _mm256_maskz_min_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_epi16&expand=3679) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsw))] pub unsafe fn _mm_mask_min_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let min = _mm_min_epi16(a, b).as_i16x8(); @@ -2166,6 +2340,7 @@ pub unsafe fn _mm_mask_min_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_epi16&expand=3680) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsw))] pub unsafe fn _mm_maskz_min_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let min = _mm_min_epi16(a, b).as_i16x8(); @@ -2178,6 +2353,7 @@ pub unsafe fn _mm_maskz_min_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_epi8&expand=3714) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsb))] pub unsafe fn _mm512_min_epi8(a: __m512i, b: __m512i) -> __m512i { transmute(vpminsb(a.as_i8x64(), b.as_i8x64())) @@ -2188,6 +2364,7 @@ pub unsafe fn _mm512_min_epi8(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_epi8&expand=3712) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsb))] pub unsafe fn _mm512_mask_min_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let min = _mm512_min_epi8(a, b).as_i8x64(); @@ -2199,6 +2376,7 @@ pub unsafe fn _mm512_mask_min_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_epi8&expand=3713) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsb))] pub unsafe fn _mm512_maskz_min_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let min = _mm512_min_epi8(a, b).as_i8x64(); @@ -2211,6 +2389,7 @@ pub unsafe fn _mm512_maskz_min_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_epi8&expand=3709) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsb))] pub unsafe fn _mm256_mask_min_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let min = _mm256_min_epi8(a, b).as_i8x32(); @@ -2222,6 +2401,7 @@ pub unsafe fn _mm256_mask_min_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_epi8&expand=3710) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsb))] pub unsafe fn _mm256_maskz_min_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let min = _mm256_min_epi8(a, b).as_i8x32(); @@ -2234,6 +2414,7 @@ pub unsafe fn _mm256_maskz_min_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_epi8&expand=3706) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsb))] pub unsafe fn _mm_mask_min_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let min = _mm_min_epi8(a, b).as_i8x16(); @@ -2245,6 +2426,7 @@ pub unsafe fn _mm_mask_min_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_epi8&expand=3707) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsb))] pub unsafe fn _mm_maskz_min_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let min = _mm_min_epi8(a, b).as_i8x16(); @@ -2257,6 +2439,7 @@ pub unsafe fn _mm_maskz_min_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_cmplt_epu16_mask&expand=1050) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmplt_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { simd_bitmask::(simd_lt(a.as_u16x32(), b.as_u16x32())) @@ -2267,6 +2450,7 @@ pub unsafe fn _mm512_cmplt_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_epu16_mask&expand=1051) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmplt_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_cmplt_epu16_mask(a, b) & k1 @@ -2277,6 +2461,7 @@ pub unsafe fn _mm512_mask_cmplt_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_cmplt_epu16_mask&expand=1050) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmplt_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { simd_bitmask::(simd_lt(a.as_u16x16(), b.as_u16x16())) @@ -2287,6 +2472,7 @@ pub unsafe fn _mm256_cmplt_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmplt_epu16_mask&expand=1049) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmplt_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_cmplt_epu16_mask(a, b) & k1 @@ -2297,6 +2483,7 @@ pub unsafe fn _mm256_mask_cmplt_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi16_mask&expand=1018) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmplt_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_lt(a.as_u16x8(), b.as_u16x8())) @@ -2307,6 +2494,7 @@ pub unsafe fn _mm_cmplt_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmplt_epi16_mask&expand=1019) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmplt_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmplt_epu16_mask(a, b) & k1 @@ -2317,6 +2505,7 @@ pub unsafe fn _mm_mask_cmplt_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm512_cmplt_epu8_mask&expand=1068) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmplt_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { simd_bitmask::(simd_lt(a.as_u8x64(), b.as_u8x64())) @@ -2327,6 +2516,7 @@ pub unsafe fn _mm512_cmplt_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_epu8_mask&expand=1069) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmplt_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_cmplt_epu8_mask(a, b) & k1 @@ -2337,6 +2527,7 @@ pub unsafe fn _mm512_mask_cmplt_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmplt_epu8_mask&expand=1066) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmplt_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { simd_bitmask::(simd_lt(a.as_u8x32(), b.as_u8x32())) @@ -2347,6 +2538,7 @@ pub unsafe fn _mm256_cmplt_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmplt_epu8_mask&expand=1067) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmplt_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_cmplt_epu8_mask(a, b) & k1 @@ -2357,6 +2549,7 @@ pub unsafe fn _mm256_mask_cmplt_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epu8_mask&expand=1064) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmplt_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { simd_bitmask::(simd_lt(a.as_u8x16(), b.as_u8x16())) @@ -2367,6 +2560,7 @@ pub unsafe fn _mm_cmplt_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmplt_epu8_mask&expand=1065) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmplt_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_cmplt_epu8_mask(a, b) & k1 @@ -2377,6 +2571,7 @@ pub unsafe fn _mm_mask_cmplt_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmplt_epi16_mask&expand=1022) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmplt_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { simd_bitmask::(simd_lt(a.as_i16x32(), b.as_i16x32())) @@ -2387,6 +2582,7 @@ pub unsafe fn _mm512_cmplt_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_epi16_mask&expand=1023) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmplt_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_cmplt_epi16_mask(a, b) & k1 @@ -2397,6 +2593,7 @@ pub unsafe fn _mm512_mask_cmplt_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmplt_epi16_mask&expand=1020) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmplt_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { simd_bitmask::(simd_lt(a.as_i16x16(), b.as_i16x16())) @@ -2407,6 +2604,7 @@ pub unsafe fn _mm256_cmplt_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmplt_epi16_mask&expand=1021) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmplt_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_cmplt_epi16_mask(a, b) & k1 @@ -2417,6 +2615,7 @@ pub unsafe fn _mm256_mask_cmplt_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi16_mask&expand=1018) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmplt_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_lt(a.as_i16x8(), b.as_i16x8())) @@ -2427,6 +2626,7 @@ pub unsafe fn _mm_cmplt_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmplt_epi16_mask&expand=1019) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmplt_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmplt_epi16_mask(a, b) & k1 @@ -2437,6 +2637,7 @@ pub unsafe fn _mm_mask_cmplt_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmplt_epi8_mask&expand=1044) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmplt_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { simd_bitmask::(simd_lt(a.as_i8x64(), b.as_i8x64())) @@ -2447,6 +2648,7 @@ pub unsafe fn _mm512_cmplt_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_epi8_mask&expand=1045) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmplt_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_cmplt_epi8_mask(a, b) & k1 @@ -2457,6 +2659,7 @@ pub unsafe fn _mm512_mask_cmplt_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmplt_epi8_mask&expand=1042) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmplt_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { simd_bitmask::(simd_lt(a.as_i8x32(), b.as_i8x32())) @@ -2467,6 +2670,7 @@ pub unsafe fn _mm256_cmplt_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmplt_epi8_mask&expand=1043) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmplt_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_cmplt_epi8_mask(a, b) & k1 @@ -2477,6 +2681,7 @@ pub unsafe fn _mm256_mask_cmplt_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi8_mask&expand=1040) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmplt_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { simd_bitmask::(simd_lt(a.as_i8x16(), b.as_i8x16())) @@ -2487,6 +2692,7 @@ pub unsafe fn _mm_cmplt_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmplt_epi8_mask&expand=1041) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmplt_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_cmplt_epi8_mask(a, b) & k1 @@ -2497,6 +2703,7 @@ pub unsafe fn _mm_mask_cmplt_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpgt_epu16_mask&expand=927) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmpgt_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { simd_bitmask::(simd_gt(a.as_u16x32(), b.as_u16x32())) @@ -2507,6 +2714,7 @@ pub unsafe fn _mm512_cmpgt_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpgt_epu16_mask&expand=928) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmpgt_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_cmpgt_epu16_mask(a, b) & k1 @@ -2517,6 +2725,7 @@ pub unsafe fn _mm512_mask_cmpgt_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epu16_mask&expand=925) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmpgt_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { simd_bitmask::(simd_gt(a.as_u16x16(), b.as_u16x16())) @@ -2527,6 +2736,7 @@ pub unsafe fn _mm256_cmpgt_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpgt_epu16_mask&expand=926) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmpgt_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_cmpgt_epu16_mask(a, b) & k1 @@ -2537,6 +2747,7 @@ pub unsafe fn _mm256_mask_cmpgt_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epu16_mask&expand=923) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmpgt_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_gt(a.as_u16x8(), b.as_u16x8())) @@ -2547,6 +2758,7 @@ pub unsafe fn _mm_cmpgt_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpgt_epu16_mask&expand=924) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmpgt_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpgt_epu16_mask(a, b) & k1 @@ -2557,6 +2769,7 @@ pub unsafe fn _mm_mask_cmpgt_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpgt_epu8_mask&expand=945) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmpgt_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { simd_bitmask::(simd_gt(a.as_u8x64(), b.as_u8x64())) @@ -2567,6 +2780,7 @@ pub unsafe fn _mm512_cmpgt_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpgt_epu8_mask&expand=946) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmpgt_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_cmpgt_epu8_mask(a, b) & k1 @@ -2577,6 +2791,7 @@ pub unsafe fn _mm512_mask_cmpgt_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epu8_mask&expand=943) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmpgt_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { simd_bitmask::(simd_gt(a.as_u8x32(), b.as_u8x32())) @@ -2587,6 +2802,7 @@ pub unsafe fn _mm256_cmpgt_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpgt_epu8_mask&expand=944) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmpgt_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_cmpgt_epu8_mask(a, b) & k1 @@ -2597,6 +2813,7 @@ pub unsafe fn _mm256_mask_cmpgt_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epu8_mask&expand=941) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmpgt_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { simd_bitmask::(simd_gt(a.as_u8x16(), b.as_u8x16())) @@ -2607,6 +2824,7 @@ pub unsafe fn _mm_cmpgt_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpgt_epu8_mask&expand=942) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmpgt_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_cmpgt_epu8_mask(a, b) & k1 @@ -2617,6 +2835,7 @@ pub unsafe fn _mm_mask_cmpgt_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpgt_epi16_mask&expand=897) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmpgt_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { simd_bitmask::(simd_gt(a.as_i16x32(), b.as_i16x32())) @@ -2627,6 +2846,7 @@ pub unsafe fn _mm512_cmpgt_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpgt_epi16_mask&expand=898) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmpgt_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_cmpgt_epi16_mask(a, b) & k1 @@ -2637,6 +2857,7 @@ pub unsafe fn _mm512_mask_cmpgt_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epi16_mask&expand=895) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmpgt_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { simd_bitmask::(simd_gt(a.as_i16x16(), b.as_i16x16())) @@ -2647,6 +2868,7 @@ pub unsafe fn _mm256_cmpgt_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpgt_epi16_mask&expand=896) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmpgt_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_cmpgt_epi16_mask(a, b) & k1 @@ -2657,6 +2879,7 @@ pub unsafe fn _mm256_mask_cmpgt_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi16_mask&expand=893) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmpgt_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_gt(a.as_i16x8(), b.as_i16x8())) @@ -2667,6 +2890,7 @@ pub unsafe fn _mm_cmpgt_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpgt_epi16_mask&expand=894) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmpgt_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpgt_epi16_mask(a, b) & k1 @@ -2677,6 +2901,7 @@ pub unsafe fn _mm_mask_cmpgt_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpgt_epi8_mask&expand=921) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmpgt_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { simd_bitmask::(simd_gt(a.as_i8x64(), b.as_i8x64())) @@ -2687,6 +2912,7 @@ pub unsafe fn _mm512_cmpgt_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpgt_epi8_mask&expand=922) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmpgt_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_cmpgt_epi8_mask(a, b) & k1 @@ -2697,6 +2923,7 @@ pub unsafe fn _mm512_mask_cmpgt_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epi8_mask&expand=919) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmpgt_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { simd_bitmask::(simd_gt(a.as_i8x32(), b.as_i8x32())) @@ -2707,6 +2934,7 @@ pub unsafe fn _mm256_cmpgt_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpgt_epi8_mask&expand=920) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmpgt_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_cmpgt_epi8_mask(a, b) & k1 @@ -2717,6 +2945,7 @@ pub unsafe fn _mm256_mask_cmpgt_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi8_mask&expand=917) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmpgt_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { simd_bitmask::(simd_gt(a.as_i8x16(), b.as_i8x16())) @@ -2727,6 +2956,7 @@ pub unsafe fn _mm_cmpgt_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpgt_epi8_mask&expand=918) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmpgt_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_cmpgt_epi8_mask(a, b) & k1 @@ -2737,6 +2967,7 @@ pub unsafe fn _mm_mask_cmpgt_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_epu16_mask&expand=989) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmple_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { simd_bitmask::(simd_le(a.as_u16x32(), b.as_u16x32())) @@ -2747,6 +2978,7 @@ pub unsafe fn _mm512_cmple_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_epu16_mask&expand=990) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmple_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_cmple_epu16_mask(a, b) & k1 @@ -2757,6 +2989,7 @@ pub unsafe fn _mm512_mask_cmple_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmple_epu16_mask&expand=987) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmple_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { simd_bitmask::(simd_le(a.as_u16x16(), b.as_u16x16())) @@ -2767,6 +3000,7 @@ pub unsafe fn _mm256_cmple_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmple_epu16_mask&expand=988) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmple_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_cmple_epu16_mask(a, b) & k1 @@ -2777,6 +3011,7 @@ pub unsafe fn _mm256_mask_cmple_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_epu16_mask&expand=985) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmple_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_le(a.as_u16x8(), b.as_u16x8())) @@ -2787,6 +3022,7 @@ pub unsafe fn _mm_cmple_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmple_epu16_mask&expand=986) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmple_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmple_epu16_mask(a, b) & k1 @@ -2797,6 +3033,7 @@ pub unsafe fn _mm_mask_cmple_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_epu8_mask&expand=1007) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmple_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { simd_bitmask::(simd_le(a.as_u8x64(), b.as_u8x64())) @@ -2807,6 +3044,7 @@ pub unsafe fn _mm512_cmple_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_epu8_mask&expand=1008) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmple_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_cmple_epu8_mask(a, b) & k1 @@ -2817,6 +3055,7 @@ pub unsafe fn _mm512_mask_cmple_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmple_epu8_mask&expand=1005) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmple_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { simd_bitmask::(simd_le(a.as_u8x32(), b.as_u8x32())) @@ -2827,6 +3066,7 @@ pub unsafe fn _mm256_cmple_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmple_epu8_mask&expand=1006) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmple_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_cmple_epu8_mask(a, b) & k1 @@ -2837,6 +3077,7 @@ pub unsafe fn _mm256_mask_cmple_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_epu8_mask&expand=1003) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmple_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { simd_bitmask::(simd_le(a.as_u8x16(), b.as_u8x16())) @@ -2847,6 +3088,7 @@ pub unsafe fn _mm_cmple_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmple_epu8_mask&expand=1004) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmple_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_cmple_epu8_mask(a, b) & k1 @@ -2857,6 +3099,7 @@ pub unsafe fn _mm_mask_cmple_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_epi16_mask&expand=965) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmple_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { simd_bitmask::(simd_le(a.as_i16x32(), b.as_i16x32())) @@ -2867,6 +3110,7 @@ pub unsafe fn _mm512_cmple_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_epi16_mask&expand=966) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmple_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_cmple_epi16_mask(a, b) & k1 @@ -2877,6 +3121,7 @@ pub unsafe fn _mm512_mask_cmple_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmple_epi16_mask&expand=963) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmple_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { simd_bitmask::(simd_le(a.as_i16x16(), b.as_i16x16())) @@ -2887,6 +3132,7 @@ pub unsafe fn _mm256_cmple_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmple_epi16_mask&expand=964) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmple_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_cmple_epi16_mask(a, b) & k1 @@ -2897,6 +3143,7 @@ pub unsafe fn _mm256_mask_cmple_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_epi16_mask&expand=961) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmple_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_le(a.as_i16x8(), b.as_i16x8())) @@ -2907,6 +3154,7 @@ pub unsafe fn _mm_cmple_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmple_epi16_mask&expand=962) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmple_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmple_epi16_mask(a, b) & k1 @@ -2917,6 +3165,7 @@ pub unsafe fn _mm_mask_cmple_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_epi8_mask&expand=983) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmple_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { simd_bitmask::(simd_le(a.as_i8x64(), b.as_i8x64())) @@ -2927,6 +3176,7 @@ pub unsafe fn _mm512_cmple_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_epi8_mask&expand=984) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmple_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_cmple_epi8_mask(a, b) & k1 @@ -2937,6 +3187,7 @@ pub unsafe fn _mm512_mask_cmple_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmple_epi8_mask&expand=981) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmple_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { simd_bitmask::(simd_le(a.as_i8x32(), b.as_i8x32())) @@ -2947,6 +3198,7 @@ pub unsafe fn _mm256_cmple_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmple_epi8_mask&expand=982) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmple_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_cmple_epi8_mask(a, b) & k1 @@ -2957,6 +3209,7 @@ pub unsafe fn _mm256_mask_cmple_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_epi8_mask&expand=979) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmple_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { simd_bitmask::(simd_le(a.as_i8x16(), b.as_i8x16())) @@ -2967,6 +3220,7 @@ pub unsafe fn _mm_cmple_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmple_epi8_mask&expand=980) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmple_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_cmple_epi8_mask(a, b) & k1 @@ -2977,6 +3231,7 @@ pub unsafe fn _mm_mask_cmple_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpge_epu16_mask&expand=867) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmpge_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { simd_bitmask::(simd_ge(a.as_u16x32(), b.as_u16x32())) @@ -2987,6 +3242,7 @@ pub unsafe fn _mm512_cmpge_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpge_epu16_mask&expand=868) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmpge_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_cmpge_epu16_mask(a, b) & k1 @@ -2997,6 +3253,7 @@ pub unsafe fn _mm512_mask_cmpge_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpge_epu16_mask&expand=865) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmpge_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { simd_bitmask::(simd_ge(a.as_u16x16(), b.as_u16x16())) @@ -3007,6 +3264,7 @@ pub unsafe fn _mm256_cmpge_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpge_epu16_mask&expand=866) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmpge_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_cmpge_epu16_mask(a, b) & k1 @@ -3017,6 +3275,7 @@ pub unsafe fn _mm256_mask_cmpge_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_epu16_mask&expand=863) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmpge_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_ge(a.as_u16x8(), b.as_u16x8())) @@ -3027,6 +3286,7 @@ pub unsafe fn _mm_cmpge_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpge_epu16_mask&expand=864) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmpge_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpge_epu16_mask(a, b) & k1 @@ -3037,6 +3297,7 @@ pub unsafe fn _mm_mask_cmpge_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpge_epu8_mask&expand=885) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmpge_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { simd_bitmask::(simd_ge(a.as_u8x64(), b.as_u8x64())) @@ -3047,6 +3308,7 @@ pub unsafe fn _mm512_cmpge_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpge_epu8_mask&expand=886) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmpge_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_cmpge_epu8_mask(a, b) & k1 @@ -3057,6 +3319,7 @@ pub unsafe fn _mm512_mask_cmpge_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpge_epu8_mask&expand=883) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmpge_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { simd_bitmask::(simd_ge(a.as_u8x32(), b.as_u8x32())) @@ -3067,6 +3330,7 @@ pub unsafe fn _mm256_cmpge_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpge_epu8_mask&expand=884) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmpge_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_cmpge_epu8_mask(a, b) & k1 @@ -3077,6 +3341,7 @@ pub unsafe fn _mm256_mask_cmpge_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_epu8_mask&expand=881) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmpge_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { simd_bitmask::(simd_ge(a.as_u8x16(), b.as_u8x16())) @@ -3087,6 +3352,7 @@ pub unsafe fn _mm_cmpge_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpge_epu8_mask&expand=882) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmpge_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_cmpge_epu8_mask(a, b) & k1 @@ -3097,6 +3363,7 @@ pub unsafe fn _mm_mask_cmpge_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpge_epi16_mask&expand=843) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmpge_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { simd_bitmask::(simd_ge(a.as_i16x32(), b.as_i16x32())) @@ -3107,6 +3374,7 @@ pub unsafe fn _mm512_cmpge_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpge_epi16_mask&expand=844) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmpge_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_cmpge_epi16_mask(a, b) & k1 @@ -3117,6 +3385,7 @@ pub unsafe fn _mm512_mask_cmpge_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpge_epi16_mask&expand=841) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmpge_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { simd_bitmask::(simd_ge(a.as_i16x16(), b.as_i16x16())) @@ -3127,6 +3396,7 @@ pub unsafe fn _mm256_cmpge_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpge_epi16_mask&expand=842) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmpge_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_cmpge_epi16_mask(a, b) & k1 @@ -3137,6 +3407,7 @@ pub unsafe fn _mm256_mask_cmpge_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_epi16_mask&expand=839) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmpge_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_ge(a.as_i16x8(), b.as_i16x8())) @@ -3147,6 +3418,7 @@ pub unsafe fn _mm_cmpge_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpge_epi16_mask&expand=840) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmpge_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpge_epi16_mask(a, b) & k1 @@ -3157,6 +3429,7 @@ pub unsafe fn _mm_mask_cmpge_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpge_epi8_mask&expand=861) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmpge_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { simd_bitmask::(simd_ge(a.as_i8x64(), b.as_i8x64())) @@ -3167,6 +3440,7 @@ pub unsafe fn _mm512_cmpge_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpge_epi8_mask&expand=862) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmpge_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_cmpge_epi8_mask(a, b) & k1 @@ -3177,6 +3451,7 @@ pub unsafe fn _mm512_mask_cmpge_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpge_epi8_mask&expand=859) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmpge_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { simd_bitmask::(simd_ge(a.as_i8x32(), b.as_i8x32())) @@ -3187,6 +3462,7 @@ pub unsafe fn _mm256_cmpge_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpge_epi8_mask&expand=860) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmpge_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_cmpge_epi8_mask(a, b) & k1 @@ -3197,6 +3473,7 @@ pub unsafe fn _mm256_mask_cmpge_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_epi8_mask&expand=857) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmpge_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { simd_bitmask::(simd_ge(a.as_i8x16(), b.as_i8x16())) @@ -3207,6 +3484,7 @@ pub unsafe fn _mm_cmpge_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpge_epi8_mask&expand=858) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmpge_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_cmpge_epi8_mask(a, b) & k1 @@ -3217,6 +3495,7 @@ pub unsafe fn _mm_mask_cmpge_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_epu16_mask&expand=801) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmpeq_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { simd_bitmask::(simd_eq(a.as_u16x32(), b.as_u16x32())) @@ -3227,6 +3506,7 @@ pub unsafe fn _mm512_cmpeq_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_epu16_mask&expand=802) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmpeq_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_cmpeq_epu16_mask(a, b) & k1 @@ -3237,6 +3517,7 @@ pub unsafe fn _mm512_mask_cmpeq_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epu16_mask&expand=799) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmpeq_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { simd_bitmask::(simd_eq(a.as_u16x16(), b.as_u16x16())) @@ -3247,6 +3528,7 @@ pub unsafe fn _mm256_cmpeq_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpeq_epu16_mask&expand=800) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmpeq_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_cmpeq_epu16_mask(a, b) & k1 @@ -3257,6 +3539,7 @@ pub unsafe fn _mm256_mask_cmpeq_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epu16_mask&expand=797) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmpeq_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_eq(a.as_u16x8(), b.as_u16x8())) @@ -3267,6 +3550,7 @@ pub unsafe fn _mm_cmpeq_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpeq_epu16_mask&expand=798) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmpeq_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpeq_epu16_mask(a, b) & k1 @@ -3277,6 +3561,7 @@ pub unsafe fn _mm_mask_cmpeq_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_epu8_mask&expand=819) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmpeq_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { simd_bitmask::(simd_eq(a.as_u8x64(), b.as_u8x64())) @@ -3287,6 +3572,7 @@ pub unsafe fn _mm512_cmpeq_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_epu8_mask&expand=820) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmpeq_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_cmpeq_epu8_mask(a, b) & k1 @@ -3297,6 +3583,7 @@ pub unsafe fn _mm512_mask_cmpeq_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epu8_mask&expand=817) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmpeq_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { simd_bitmask::(simd_eq(a.as_u8x32(), b.as_u8x32())) @@ -3307,6 +3594,7 @@ pub unsafe fn _mm256_cmpeq_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpeq_epu8_mask&expand=818) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmpeq_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_cmpeq_epu8_mask(a, b) & k1 @@ -3317,6 +3605,7 @@ pub unsafe fn _mm256_mask_cmpeq_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epu8_mask&expand=815) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmpeq_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { simd_bitmask::(simd_eq(a.as_u8x16(), b.as_u8x16())) @@ -3327,6 +3616,7 @@ pub unsafe fn _mm_cmpeq_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpeq_epu8_mask&expand=816) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmpeq_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_cmpeq_epu8_mask(a, b) & k1 @@ -3337,6 +3627,7 @@ pub unsafe fn _mm_mask_cmpeq_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_epi16_mask&expand=771) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmpeq_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { simd_bitmask::(simd_eq(a.as_i16x32(), b.as_i16x32())) @@ -3347,6 +3638,7 @@ pub unsafe fn _mm512_cmpeq_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_epi16_mask&expand=772) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmpeq_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_cmpeq_epi16_mask(a, b) & k1 @@ -3357,6 +3649,7 @@ pub unsafe fn _mm512_mask_cmpeq_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epi16_mask&expand=769) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmpeq_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { simd_bitmask::(simd_eq(a.as_i16x16(), b.as_i16x16())) @@ -3367,6 +3660,7 @@ pub unsafe fn _mm256_cmpeq_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpeq_epi16_mask&expand=770) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmpeq_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_cmpeq_epi16_mask(a, b) & k1 @@ -3377,6 +3671,7 @@ pub unsafe fn _mm256_mask_cmpeq_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi16_mask&expand=767) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmpeq_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_eq(a.as_i16x8(), b.as_i16x8())) @@ -3387,6 +3682,7 @@ pub unsafe fn _mm_cmpeq_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpeq_epi16_mask&expand=768) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmpeq_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpeq_epi16_mask(a, b) & k1 @@ -3397,6 +3693,7 @@ pub unsafe fn _mm_mask_cmpeq_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_epi8_mask&expand=795) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmpeq_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { simd_bitmask::(simd_eq(a.as_i8x64(), b.as_i8x64())) @@ -3407,6 +3704,7 @@ pub unsafe fn _mm512_cmpeq_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_epi8_mask&expand=796) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmpeq_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_cmpeq_epi8_mask(a, b) & k1 @@ -3417,6 +3715,7 @@ pub unsafe fn _mm512_mask_cmpeq_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epi8_mask&expand=793) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmpeq_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { simd_bitmask::(simd_eq(a.as_i8x32(), b.as_i8x32())) @@ -3427,6 +3726,7 @@ pub unsafe fn _mm256_cmpeq_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpeq_epi8_mask&expand=794) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmpeq_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_cmpeq_epi8_mask(a, b) & k1 @@ -3437,6 +3737,7 @@ pub unsafe fn _mm256_mask_cmpeq_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi8_mask&expand=791) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmpeq_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { simd_bitmask::(simd_eq(a.as_i8x16(), b.as_i8x16())) @@ -3447,6 +3748,7 @@ pub unsafe fn _mm_cmpeq_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpeq_epi8_mask&expand=792) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmpeq_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_cmpeq_epi8_mask(a, b) & k1 @@ -3457,6 +3759,7 @@ pub unsafe fn _mm_mask_cmpeq_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_epu16_mask&expand=1106) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmpneq_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { simd_bitmask::(simd_ne(a.as_u16x32(), b.as_u16x32())) @@ -3467,6 +3770,7 @@ pub unsafe fn _mm512_cmpneq_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_epu16_mask&expand=1107) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmpneq_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_cmpneq_epu16_mask(a, b) & k1 @@ -3477,6 +3781,7 @@ pub unsafe fn _mm512_mask_cmpneq_epu16_mask(k1: __mmask32, a: __m512i, b: __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpneq_epu16_mask&expand=1104) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmpneq_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { simd_bitmask::(simd_ne(a.as_u16x16(), b.as_u16x16())) @@ -3487,6 +3792,7 @@ pub unsafe fn _mm256_cmpneq_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpneq_epu16_mask&expand=1105) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmpneq_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_cmpneq_epu16_mask(a, b) & k1 @@ -3497,6 +3803,7 @@ pub unsafe fn _mm256_mask_cmpneq_epu16_mask(k1: __mmask16, a: __m256i, b: __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_epu16_mask&expand=1102) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmpneq_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_ne(a.as_u16x8(), b.as_u16x8())) @@ -3507,6 +3814,7 @@ pub unsafe fn _mm_cmpneq_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpneq_epu16_mask&expand=1103) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmpneq_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpneq_epu16_mask(a, b) & k1 @@ -3517,6 +3825,7 @@ pub unsafe fn _mm_mask_cmpneq_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_epu8_mask&expand=1124) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmpneq_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { simd_bitmask::(simd_ne(a.as_u8x64(), b.as_u8x64())) @@ -3527,6 +3836,7 @@ pub unsafe fn _mm512_cmpneq_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_epu8_mask&expand=1125) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmpneq_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_cmpneq_epu8_mask(a, b) & k1 @@ -3537,6 +3847,7 @@ pub unsafe fn _mm512_mask_cmpneq_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpneq_epu8_mask&expand=1122) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmpneq_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { simd_bitmask::(simd_ne(a.as_u8x32(), b.as_u8x32())) @@ -3547,6 +3858,7 @@ pub unsafe fn _mm256_cmpneq_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpneq_epu8_mask&expand=1123) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmpneq_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_cmpneq_epu8_mask(a, b) & k1 @@ -3557,6 +3869,7 @@ pub unsafe fn _mm256_mask_cmpneq_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_epu8_mask&expand=1120) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmpneq_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { simd_bitmask::(simd_ne(a.as_u8x16(), b.as_u8x16())) @@ -3567,6 +3880,7 @@ pub unsafe fn _mm_cmpneq_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpneq_epu8_mask&expand=1121) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmpneq_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_cmpneq_epu8_mask(a, b) & k1 @@ -3577,6 +3891,7 @@ pub unsafe fn _mm_mask_cmpneq_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_epi16_mask&expand=1082) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmpneq_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { simd_bitmask::(simd_ne(a.as_i16x32(), b.as_i16x32())) @@ -3587,6 +3902,7 @@ pub unsafe fn _mm512_cmpneq_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_epi16_mask&expand=1083) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmpneq_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { _mm512_cmpneq_epi16_mask(a, b) & k1 @@ -3597,6 +3913,7 @@ pub unsafe fn _mm512_mask_cmpneq_epi16_mask(k1: __mmask32, a: __m512i, b: __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpneq_epi16_mask&expand=1080) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmpneq_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { simd_bitmask::(simd_ne(a.as_i16x16(), b.as_i16x16())) @@ -3607,6 +3924,7 @@ pub unsafe fn _mm256_cmpneq_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpneq_epi16_mask&expand=1081) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmpneq_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { _mm256_cmpneq_epi16_mask(a, b) & k1 @@ -3617,6 +3935,7 @@ pub unsafe fn _mm256_mask_cmpneq_epi16_mask(k1: __mmask16, a: __m256i, b: __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_epi16_mask&expand=1078) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmpneq_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_ne(a.as_i16x8(), b.as_i16x8())) @@ -3627,6 +3946,7 @@ pub unsafe fn _mm_cmpneq_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpneq_epi16_mask&expand=1079) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmpneq_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpneq_epi16_mask(a, b) & k1 @@ -3637,6 +3957,7 @@ pub unsafe fn _mm_mask_cmpneq_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_epi8_mask&expand=1100) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_cmpneq_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { simd_bitmask::(simd_ne(a.as_i8x64(), b.as_i8x64())) @@ -3647,6 +3968,7 @@ pub unsafe fn _mm512_cmpneq_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_epi8_mask&expand=1101) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm512_mask_cmpneq_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { _mm512_cmpneq_epi8_mask(a, b) & k1 @@ -3657,6 +3979,7 @@ pub unsafe fn _mm512_mask_cmpneq_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpneq_epi8_mask&expand=1098) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_cmpneq_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { simd_bitmask::(simd_ne(a.as_i8x32(), b.as_i8x32())) @@ -3667,6 +3990,7 @@ pub unsafe fn _mm256_cmpneq_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpneq_epi8_mask&expand=1099) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm256_mask_cmpneq_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { _mm256_cmpneq_epi8_mask(a, b) & k1 @@ -3677,6 +4001,7 @@ pub unsafe fn _mm256_mask_cmpneq_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_epi8_mask&expand=1096) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_cmpneq_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { simd_bitmask::(simd_ne(a.as_i8x16(), b.as_i8x16())) @@ -3687,6 +4012,7 @@ pub unsafe fn _mm_cmpneq_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpneq_epi8_mask&expand=1097) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] pub unsafe fn _mm_mask_cmpneq_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { _mm_cmpneq_epi8_mask(a, b) & k1 @@ -3697,6 +4023,7 @@ pub unsafe fn _mm_mask_cmpneq_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_epu16_mask&expand=715) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm512_cmp_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 { @@ -3711,6 +4038,7 @@ pub unsafe fn _mm512_cmp_epu16_mask(a: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_epu16_mask&expand=716) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm512_mask_cmp_epu16_mask( @@ -3729,6 +4057,7 @@ pub unsafe fn _mm512_mask_cmp_epu16_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_epu16_mask&expand=713) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm256_cmp_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 { @@ -3743,6 +4072,7 @@ pub unsafe fn _mm256_cmp_epu16_mask(a: __m256i, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_epu16_mask&expand=714) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm256_mask_cmp_epu16_mask( @@ -3761,6 +4091,7 @@ pub unsafe fn _mm256_mask_cmp_epu16_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_epu16_mask&expand=711) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm_cmp_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 { @@ -3775,6 +4106,7 @@ pub unsafe fn _mm_cmp_epu16_mask(a: __m128i, b: __m128i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_epu16_mask&expand=712) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm_mask_cmp_epu16_mask( @@ -3793,6 +4125,7 @@ pub unsafe fn _mm_mask_cmp_epu16_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_epu8_mask&expand=733) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm512_cmp_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 { @@ -3812,6 +4145,7 @@ pub unsafe fn _mm512_cmp_epu8_mask(a: __m512i, b: __m512i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_epu8_mask&expand=734) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm512_mask_cmp_epu8_mask( @@ -3830,6 +4164,7 @@ pub unsafe fn _mm512_mask_cmp_epu8_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_epu8_mask&expand=731) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm256_cmp_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 { @@ -3844,6 +4179,7 @@ pub unsafe fn _mm256_cmp_epu8_mask(a: __m256i, b: __m256i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_epu8_mask&expand=732) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm256_mask_cmp_epu8_mask( @@ -3862,6 +4198,7 @@ pub unsafe fn _mm256_mask_cmp_epu8_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_epu8_mask&expand=729) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm_cmp_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 { @@ -3876,6 +4213,7 @@ pub unsafe fn _mm_cmp_epu8_mask(a: __m128i, b: __m128i) -> __mm /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_epu8_mask&expand=730) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm_mask_cmp_epu8_mask( @@ -3894,6 +4232,7 @@ pub unsafe fn _mm_mask_cmp_epu8_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_epi16_mask&expand=691) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm512_cmp_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { @@ -3908,6 +4247,7 @@ pub unsafe fn _mm512_cmp_epi16_mask(a: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_epi16_mask&expand=692) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm512_mask_cmp_epi16_mask( @@ -3926,6 +4266,7 @@ pub unsafe fn _mm512_mask_cmp_epi16_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_epi16_mask&expand=689) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm256_cmp_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { @@ -3940,6 +4281,7 @@ pub unsafe fn _mm256_cmp_epi16_mask(a: __m256i, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_epi16_mask&expand=690) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm256_mask_cmp_epi16_mask( @@ -3958,6 +4300,7 @@ pub unsafe fn _mm256_mask_cmp_epi16_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_epi16_mask&expand=687) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm_cmp_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { @@ -3972,6 +4315,7 @@ pub unsafe fn _mm_cmp_epi16_mask(a: __m128i, b: __m128i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_epi16_mask&expand=688) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm_mask_cmp_epi16_mask( @@ -3990,6 +4334,7 @@ pub unsafe fn _mm_mask_cmp_epi16_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_epi8_mask&expand=709) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm512_cmp_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { @@ -4009,6 +4354,7 @@ pub unsafe fn _mm512_cmp_epi8_mask(a: __m512i, b: __m512i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_epi8_mask&expand=710) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm512_mask_cmp_epi8_mask( @@ -4027,6 +4373,7 @@ pub unsafe fn _mm512_mask_cmp_epi8_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_epi8_mask&expand=707) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm256_cmp_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { @@ -4041,6 +4388,7 @@ pub unsafe fn _mm256_cmp_epi8_mask(a: __m256i, b: __m256i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_epi8_mask&expand=708) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm256_mask_cmp_epi8_mask( @@ -4059,6 +4407,7 @@ pub unsafe fn _mm256_mask_cmp_epi8_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_epi8_mask&expand=705) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm_cmp_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { @@ -4073,6 +4422,7 @@ pub unsafe fn _mm_cmp_epi8_mask(a: __m128i, b: __m128i) -> __mm /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_epi8_mask&expand=706) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))] pub unsafe fn _mm_mask_cmp_epi8_mask( @@ -4091,6 +4441,7 @@ pub unsafe fn _mm_mask_cmp_epi8_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_loadu_epi16&expand=3368) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16 pub unsafe fn _mm512_loadu_epi16(mem_addr: *const i16) -> __m512i { ptr::read_unaligned(mem_addr as *const __m512i) @@ -4101,6 +4452,7 @@ pub unsafe fn _mm512_loadu_epi16(mem_addr: *const i16) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_loadu_epi16&expand=3365) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16 pub unsafe fn _mm256_loadu_epi16(mem_addr: *const i16) -> __m256i { ptr::read_unaligned(mem_addr as *const __m256i) @@ -4111,6 +4463,7 @@ pub unsafe fn _mm256_loadu_epi16(mem_addr: *const i16) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_epi16&expand=3362) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16 pub unsafe fn _mm_loadu_epi16(mem_addr: *const i16) -> __m128i { ptr::read_unaligned(mem_addr as *const __m128i) @@ -4121,6 +4474,7 @@ pub unsafe fn _mm_loadu_epi16(mem_addr: *const i16) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_loadu_epi8&expand=3395) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8 pub unsafe fn _mm512_loadu_epi8(mem_addr: *const i8) -> __m512i { ptr::read_unaligned(mem_addr as *const __m512i) @@ -4131,6 +4485,7 @@ pub unsafe fn _mm512_loadu_epi8(mem_addr: *const i8) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_loadu_epi8&expand=3392) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8 pub unsafe fn _mm256_loadu_epi8(mem_addr: *const i8) -> __m256i { ptr::read_unaligned(mem_addr as *const __m256i) @@ -4141,6 +4496,7 @@ pub unsafe fn _mm256_loadu_epi8(mem_addr: *const i8) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_epi8&expand=3389) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8 pub unsafe fn _mm_loadu_epi8(mem_addr: *const i8) -> __m128i { ptr::read_unaligned(mem_addr as *const __m128i) @@ -4151,6 +4507,7 @@ pub unsafe fn _mm_loadu_epi8(mem_addr: *const i8) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_storeu_epi16&expand=5622) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16 pub unsafe fn _mm512_storeu_epi16(mem_addr: *mut i16, a: __m512i) { ptr::write_unaligned(mem_addr as *mut __m512i, a); @@ -4161,6 +4518,7 @@ pub unsafe fn _mm512_storeu_epi16(mem_addr: *mut i16, a: __m512i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_storeu_epi16&expand=5620) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16 pub unsafe fn _mm256_storeu_epi16(mem_addr: *mut i16, a: __m256i) { ptr::write_unaligned(mem_addr as *mut __m256i, a); @@ -4171,6 +4529,7 @@ pub unsafe fn _mm256_storeu_epi16(mem_addr: *mut i16, a: __m256i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_epi16&expand=5618) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16 pub unsafe fn _mm_storeu_epi16(mem_addr: *mut i16, a: __m128i) { ptr::write_unaligned(mem_addr as *mut __m128i, a); @@ -4181,6 +4540,7 @@ pub unsafe fn _mm_storeu_epi16(mem_addr: *mut i16, a: __m128i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_storeu_epi8&expand=5640) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8 pub unsafe fn _mm512_storeu_epi8(mem_addr: *mut i8, a: __m512i) { ptr::write_unaligned(mem_addr as *mut __m512i, a); @@ -4191,6 +4551,7 @@ pub unsafe fn _mm512_storeu_epi8(mem_addr: *mut i8, a: __m512i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_storeu_epi8&expand=5638) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8 pub unsafe fn _mm256_storeu_epi8(mem_addr: *mut i8, a: __m256i) { ptr::write_unaligned(mem_addr as *mut __m256i, a); @@ -4201,6 +4562,7 @@ pub unsafe fn _mm256_storeu_epi8(mem_addr: *mut i8, a: __m256i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_epi8&expand=5636) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8 pub unsafe fn _mm_storeu_epi8(mem_addr: *mut i8, a: __m128i) { ptr::write_unaligned(mem_addr as *mut __m128i, a); @@ -4213,6 +4575,7 @@ pub unsafe fn _mm_storeu_epi8(mem_addr: *mut i8, a: __m128i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_loadu_epi16) #[inline] #[target_feature(enable = "avx512f,avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_loadu_epi16(src: __m512i, k: __mmask32, mem_addr: *const i16) -> __m512i { let mut dst: __m512i = src; asm!( @@ -4232,6 +4595,7 @@ pub unsafe fn _mm512_mask_loadu_epi16(src: __m512i, k: __mmask32, mem_addr: *con /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_loadu_epi16) #[inline] #[target_feature(enable = "avx512f,avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_loadu_epi16(k: __mmask32, mem_addr: *const i16) -> __m512i { let mut dst: __m512i; asm!( @@ -4251,6 +4615,7 @@ pub unsafe fn _mm512_maskz_loadu_epi16(k: __mmask32, mem_addr: *const i16) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_loadu_epi8) #[inline] #[target_feature(enable = "avx512f,avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_loadu_epi8(src: __m512i, k: __mmask64, mem_addr: *const i8) -> __m512i { let mut dst: __m512i = src; asm!( @@ -4270,6 +4635,7 @@ pub unsafe fn _mm512_mask_loadu_epi8(src: __m512i, k: __mmask64, mem_addr: *cons /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_loadu_epi8) #[inline] #[target_feature(enable = "avx512f,avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_loadu_epi8(k: __mmask64, mem_addr: *const i8) -> __m512i { let mut dst: __m512i; asm!( @@ -4289,6 +4655,7 @@ pub unsafe fn _mm512_maskz_loadu_epi8(k: __mmask64, mem_addr: *const i8) -> __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_loadu_epi16) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_loadu_epi16(src: __m256i, k: __mmask16, mem_addr: *const i16) -> __m256i { let mut dst: __m256i = src; asm!( @@ -4308,6 +4675,7 @@ pub unsafe fn _mm256_mask_loadu_epi16(src: __m256i, k: __mmask16, mem_addr: *con /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_loadu_epi16) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_loadu_epi16(k: __mmask16, mem_addr: *const i16) -> __m256i { let mut dst: __m256i; asm!( @@ -4327,6 +4695,7 @@ pub unsafe fn _mm256_maskz_loadu_epi16(k: __mmask16, mem_addr: *const i16) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_loadu_epi8) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_loadu_epi8(src: __m256i, k: __mmask32, mem_addr: *const i8) -> __m256i { let mut dst: __m256i = src; asm!( @@ -4346,6 +4715,7 @@ pub unsafe fn _mm256_mask_loadu_epi8(src: __m256i, k: __mmask32, mem_addr: *cons /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_loadu_epi8) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_loadu_epi8(k: __mmask32, mem_addr: *const i8) -> __m256i { let mut dst: __m256i; asm!( @@ -4365,6 +4735,7 @@ pub unsafe fn _mm256_maskz_loadu_epi8(k: __mmask32, mem_addr: *const i8) -> __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_loadu_epi16) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_loadu_epi16(src: __m128i, k: __mmask8, mem_addr: *const i16) -> __m128i { let mut dst: __m128i = src; asm!( @@ -4384,6 +4755,7 @@ pub unsafe fn _mm_mask_loadu_epi16(src: __m128i, k: __mmask8, mem_addr: *const i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_loadu_epi16) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_maskz_loadu_epi16(k: __mmask8, mem_addr: *const i16) -> __m128i { let mut dst: __m128i; asm!( @@ -4403,6 +4775,7 @@ pub unsafe fn _mm_maskz_loadu_epi16(k: __mmask8, mem_addr: *const i16) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_loadu_epi8) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_loadu_epi8(src: __m128i, k: __mmask16, mem_addr: *const i8) -> __m128i { let mut dst: __m128i = src; asm!( @@ -4422,6 +4795,7 @@ pub unsafe fn _mm_mask_loadu_epi8(src: __m128i, k: __mmask16, mem_addr: *const i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_loadu_epi8) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_maskz_loadu_epi8(k: __mmask16, mem_addr: *const i8) -> __m128i { let mut dst: __m128i; asm!( @@ -4440,6 +4814,7 @@ pub unsafe fn _mm_maskz_loadu_epi8(k: __mmask16, mem_addr: *const i8) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_storeu_epi16) #[inline] #[target_feature(enable = "avx512f,avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask32, a: __m512i) { asm!( vps!("vmovdqu16", "{{{mask}}}, {a}"), @@ -4456,6 +4831,7 @@ pub unsafe fn _mm512_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask32, a: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_storeu_epi8) #[inline] #[target_feature(enable = "avx512f,avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask64, a: __m512i) { asm!( vps!("vmovdqu8", "{{{mask}}}, {a}"), @@ -4472,6 +4848,7 @@ pub unsafe fn _mm512_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask64, a: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_storeu_epi16) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask16, a: __m256i) { asm!( vps!("vmovdqu16", "{{{mask}}}, {a}"), @@ -4488,6 +4865,7 @@ pub unsafe fn _mm256_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask16, a: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_storeu_epi8) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask32, a: __m256i) { asm!( vps!("vmovdqu8", "{{{mask}}}, {a}"), @@ -4504,6 +4882,7 @@ pub unsafe fn _mm256_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask32, a: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_storeu_epi16) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask8, a: __m128i) { asm!( vps!("vmovdqu16", "{{{mask}}}, {a}"), @@ -4520,6 +4899,7 @@ pub unsafe fn _mm_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask8, a: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_storeu_epi8) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask16, a: __m128i) { asm!( vps!("vmovdqu8", "{{{mask}}}, {a}"), @@ -4535,6 +4915,7 @@ pub unsafe fn _mm_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask16, a: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_madd_epi16&expand=3511) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaddwd))] pub unsafe fn _mm512_madd_epi16(a: __m512i, b: __m512i) -> __m512i { transmute(vpmaddwd(a.as_i16x32(), b.as_i16x32())) @@ -4545,6 +4926,7 @@ pub unsafe fn _mm512_madd_epi16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_madd_epi16&expand=3512) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaddwd))] pub unsafe fn _mm512_mask_madd_epi16( src: __m512i, @@ -4561,6 +4943,7 @@ pub unsafe fn _mm512_mask_madd_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_madd_epi16&expand=3513) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaddwd))] pub unsafe fn _mm512_maskz_madd_epi16(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let madd = _mm512_madd_epi16(a, b).as_i32x16(); @@ -4573,6 +4956,7 @@ pub unsafe fn _mm512_maskz_madd_epi16(k: __mmask16, a: __m512i, b: __m512i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_madd_epi16&expand=3509) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaddwd))] pub unsafe fn _mm256_mask_madd_epi16(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let madd = _mm256_madd_epi16(a, b).as_i32x8(); @@ -4584,6 +4968,7 @@ pub unsafe fn _mm256_mask_madd_epi16(src: __m256i, k: __mmask8, a: __m256i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_madd_epi16&expand=3510) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaddwd))] pub unsafe fn _mm256_maskz_madd_epi16(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let madd = _mm256_madd_epi16(a, b).as_i32x8(); @@ -4596,6 +4981,7 @@ pub unsafe fn _mm256_maskz_madd_epi16(k: __mmask8, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_madd_epi16&expand=3506) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaddwd))] pub unsafe fn _mm_mask_madd_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let madd = _mm_madd_epi16(a, b).as_i32x4(); @@ -4607,6 +4993,7 @@ pub unsafe fn _mm_mask_madd_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_madd_epi16&expand=3507) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaddwd))] pub unsafe fn _mm_maskz_madd_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let madd = _mm_madd_epi16(a, b).as_i32x4(); @@ -4619,6 +5006,7 @@ pub unsafe fn _mm_maskz_madd_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maddubs_epi16&expand=3539) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaddubsw))] pub unsafe fn _mm512_maddubs_epi16(a: __m512i, b: __m512i) -> __m512i { transmute(vpmaddubsw(a.as_i8x64(), b.as_i8x64())) @@ -4629,6 +5017,7 @@ pub unsafe fn _mm512_maddubs_epi16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_maddubs_epi16&expand=3540) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaddubsw))] pub unsafe fn _mm512_mask_maddubs_epi16( src: __m512i, @@ -4645,6 +5034,7 @@ pub unsafe fn _mm512_mask_maddubs_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_maddubs_epi16&expand=3541) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaddubsw))] pub unsafe fn _mm512_maskz_maddubs_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let madd = _mm512_maddubs_epi16(a, b).as_i16x32(); @@ -4657,6 +5047,7 @@ pub unsafe fn _mm512_maskz_maddubs_epi16(k: __mmask32, a: __m512i, b: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_maddubs_epi16&expand=3537) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaddubsw))] pub unsafe fn _mm256_mask_maddubs_epi16( src: __m256i, @@ -4673,6 +5064,7 @@ pub unsafe fn _mm256_mask_maddubs_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_maddubs_epi16&expand=3538) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaddubsw))] pub unsafe fn _mm256_maskz_maddubs_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let madd = _mm256_maddubs_epi16(a, b).as_i16x16(); @@ -4685,6 +5077,7 @@ pub unsafe fn _mm256_maskz_maddubs_epi16(k: __mmask16, a: __m256i, b: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_maddubs_epi16&expand=3534) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaddubsw))] pub unsafe fn _mm_mask_maddubs_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let madd = _mm_maddubs_epi16(a, b).as_i16x8(); @@ -4696,6 +5089,7 @@ pub unsafe fn _mm_mask_maddubs_epi16(src: __m128i, k: __mmask8, a: __m128i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_maddubs_epi16&expand=3535) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaddubsw))] pub unsafe fn _mm_maskz_maddubs_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let madd = _mm_maddubs_epi16(a, b).as_i16x8(); @@ -4708,6 +5102,7 @@ pub unsafe fn _mm_maskz_maddubs_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_packs_epi32&expand=4091) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackssdw))] pub unsafe fn _mm512_packs_epi32(a: __m512i, b: __m512i) -> __m512i { transmute(vpackssdw(a.as_i32x16(), b.as_i32x16())) @@ -4718,6 +5113,7 @@ pub unsafe fn _mm512_packs_epi32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_packs_epi32&expand=4089) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackssdw))] pub unsafe fn _mm512_mask_packs_epi32( src: __m512i, @@ -4734,6 +5130,7 @@ pub unsafe fn _mm512_mask_packs_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_packs_epi32&expand=4090) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackssdw))] pub unsafe fn _mm512_maskz_packs_epi32(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let pack = _mm512_packs_epi32(a, b).as_i16x32(); @@ -4746,6 +5143,7 @@ pub unsafe fn _mm512_maskz_packs_epi32(k: __mmask32, a: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_packs_epi32&expand=4086) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackssdw))] pub unsafe fn _mm256_mask_packs_epi32( src: __m256i, @@ -4762,6 +5160,7 @@ pub unsafe fn _mm256_mask_packs_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_packs_epi32&expand=4087) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackssdw))] pub unsafe fn _mm256_maskz_packs_epi32(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let pack = _mm256_packs_epi32(a, b).as_i16x16(); @@ -4774,6 +5173,7 @@ pub unsafe fn _mm256_maskz_packs_epi32(k: __mmask16, a: __m256i, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_packs_epi32&expand=4083) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackssdw))] pub unsafe fn _mm_mask_packs_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let pack = _mm_packs_epi32(a, b).as_i16x8(); @@ -4785,6 +5185,7 @@ pub unsafe fn _mm_mask_packs_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_packs_epi32&expand=4084) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackssdw))] pub unsafe fn _mm_maskz_packs_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let pack = _mm_packs_epi32(a, b).as_i16x8(); @@ -4797,6 +5198,7 @@ pub unsafe fn _mm_maskz_packs_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_packs_epi16&expand=4082) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpacksswb))] pub unsafe fn _mm512_packs_epi16(a: __m512i, b: __m512i) -> __m512i { transmute(vpacksswb(a.as_i16x32(), b.as_i16x32())) @@ -4807,6 +5209,7 @@ pub unsafe fn _mm512_packs_epi16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_packs_epi16&expand=4080) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpacksswb))] pub unsafe fn _mm512_mask_packs_epi16( src: __m512i, @@ -4823,6 +5226,7 @@ pub unsafe fn _mm512_mask_packs_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_packs_epi16&expand=4081) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpacksswb))] pub unsafe fn _mm512_maskz_packs_epi16(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let pack = _mm512_packs_epi16(a, b).as_i8x64(); @@ -4835,6 +5239,7 @@ pub unsafe fn _mm512_maskz_packs_epi16(k: __mmask64, a: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_packs_epi16&expand=4077) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpacksswb))] pub unsafe fn _mm256_mask_packs_epi16( src: __m256i, @@ -4851,6 +5256,7 @@ pub unsafe fn _mm256_mask_packs_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=#text=_mm256_maskz_packs_epi16&expand=4078) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpacksswb))] pub unsafe fn _mm256_maskz_packs_epi16(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let pack = _mm256_packs_epi16(a, b).as_i8x32(); @@ -4863,6 +5269,7 @@ pub unsafe fn _mm256_maskz_packs_epi16(k: __mmask32, a: __m256i, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_packs_epi16&expand=4074) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpacksswb))] pub unsafe fn _mm_mask_packs_epi16(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let pack = _mm_packs_epi16(a, b).as_i8x16(); @@ -4874,6 +5281,7 @@ pub unsafe fn _mm_mask_packs_epi16(src: __m128i, k: __mmask16, a: __m128i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_packs_epi16&expand=4075) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpacksswb))] pub unsafe fn _mm_maskz_packs_epi16(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let pack = _mm_packs_epi16(a, b).as_i8x16(); @@ -4886,6 +5294,7 @@ pub unsafe fn _mm_maskz_packs_epi16(k: __mmask16, a: __m128i, b: __m128i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_packus_epi32&expand=4130) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackusdw))] pub unsafe fn _mm512_packus_epi32(a: __m512i, b: __m512i) -> __m512i { transmute(vpackusdw(a.as_i32x16(), b.as_i32x16())) @@ -4896,6 +5305,7 @@ pub unsafe fn _mm512_packus_epi32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_packus_epi32&expand=4128) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackusdw))] pub unsafe fn _mm512_mask_packus_epi32( src: __m512i, @@ -4912,6 +5322,7 @@ pub unsafe fn _mm512_mask_packus_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_packus_epi32&expand=4129) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackusdw))] pub unsafe fn _mm512_maskz_packus_epi32(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let pack = _mm512_packus_epi32(a, b).as_i16x32(); @@ -4924,6 +5335,7 @@ pub unsafe fn _mm512_maskz_packus_epi32(k: __mmask32, a: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_packus_epi32&expand=4125) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackusdw))] pub unsafe fn _mm256_mask_packus_epi32( src: __m256i, @@ -4940,6 +5352,7 @@ pub unsafe fn _mm256_mask_packus_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_packus_epi32&expand=4126) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackusdw))] pub unsafe fn _mm256_maskz_packus_epi32(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let pack = _mm256_packus_epi32(a, b).as_i16x16(); @@ -4952,6 +5365,7 @@ pub unsafe fn _mm256_maskz_packus_epi32(k: __mmask16, a: __m256i, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_packus_epi32&expand=4122) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackusdw))] pub unsafe fn _mm_mask_packus_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let pack = _mm_packus_epi32(a, b).as_i16x8(); @@ -4963,6 +5377,7 @@ pub unsafe fn _mm_mask_packus_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_packus_epi32&expand=4123) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackusdw))] pub unsafe fn _mm_maskz_packus_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let pack = _mm_packus_epi32(a, b).as_i16x8(); @@ -4975,6 +5390,7 @@ pub unsafe fn _mm_maskz_packus_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_packus_epi16&expand=4121) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackuswb))] pub unsafe fn _mm512_packus_epi16(a: __m512i, b: __m512i) -> __m512i { transmute(vpackuswb(a.as_i16x32(), b.as_i16x32())) @@ -4985,6 +5401,7 @@ pub unsafe fn _mm512_packus_epi16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_packus_epi16&expand=4119) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackuswb))] pub unsafe fn _mm512_mask_packus_epi16( src: __m512i, @@ -5001,6 +5418,7 @@ pub unsafe fn _mm512_mask_packus_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_packus_epi16&expand=4120) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackuswb))] pub unsafe fn _mm512_maskz_packus_epi16(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let pack = _mm512_packus_epi16(a, b).as_i8x64(); @@ -5013,6 +5431,7 @@ pub unsafe fn _mm512_maskz_packus_epi16(k: __mmask64, a: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_packus_epi16&expand=4116) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackuswb))] pub unsafe fn _mm256_mask_packus_epi16( src: __m256i, @@ -5029,6 +5448,7 @@ pub unsafe fn _mm256_mask_packus_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_packus_epi16&expand=4117) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackuswb))] pub unsafe fn _mm256_maskz_packus_epi16(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let pack = _mm256_packus_epi16(a, b).as_i8x32(); @@ -5041,6 +5461,7 @@ pub unsafe fn _mm256_maskz_packus_epi16(k: __mmask32, a: __m256i, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_packus_epi16&expand=4113) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackuswb))] pub unsafe fn _mm_mask_packus_epi16(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let pack = _mm_packus_epi16(a, b).as_i8x16(); @@ -5052,6 +5473,7 @@ pub unsafe fn _mm_mask_packus_epi16(src: __m128i, k: __mmask16, a: __m128i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_packus_epi16&expand=4114) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpackuswb))] pub unsafe fn _mm_maskz_packus_epi16(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let pack = _mm_packus_epi16(a, b).as_i8x16(); @@ -5064,6 +5486,7 @@ pub unsafe fn _mm_maskz_packus_epi16(k: __mmask16, a: __m128i, b: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_avg_epu16&expand=388) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpavgw))] pub unsafe fn _mm512_avg_epu16(a: __m512i, b: __m512i) -> __m512i { transmute(vpavgw(a.as_u16x32(), b.as_u16x32())) @@ -5074,6 +5497,7 @@ pub unsafe fn _mm512_avg_epu16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_avg_epu16&expand=389) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpavgw))] pub unsafe fn _mm512_mask_avg_epu16(src: __m512i, k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let avg = _mm512_avg_epu16(a, b).as_u16x32(); @@ -5085,6 +5509,7 @@ pub unsafe fn _mm512_mask_avg_epu16(src: __m512i, k: __mmask32, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_avg_epu16&expand=390) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpavgw))] pub unsafe fn _mm512_maskz_avg_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let avg = _mm512_avg_epu16(a, b).as_u16x32(); @@ -5097,6 +5522,7 @@ pub unsafe fn _mm512_maskz_avg_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_avg_epu16&expand=386) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpavgw))] pub unsafe fn _mm256_mask_avg_epu16(src: __m256i, k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let avg = _mm256_avg_epu16(a, b).as_u16x16(); @@ -5108,6 +5534,7 @@ pub unsafe fn _mm256_mask_avg_epu16(src: __m256i, k: __mmask16, a: __m256i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_avg_epu16&expand=387) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpavgw))] pub unsafe fn _mm256_maskz_avg_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let avg = _mm256_avg_epu16(a, b).as_u16x16(); @@ -5120,6 +5547,7 @@ pub unsafe fn _mm256_maskz_avg_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_avg_epu16&expand=383) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpavgw))] pub unsafe fn _mm_mask_avg_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let avg = _mm_avg_epu16(a, b).as_u16x8(); @@ -5131,6 +5559,7 @@ pub unsafe fn _mm_mask_avg_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_avg_epu16&expand=384) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpavgw))] pub unsafe fn _mm_maskz_avg_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let avg = _mm_avg_epu16(a, b).as_u16x8(); @@ -5143,6 +5572,7 @@ pub unsafe fn _mm_maskz_avg_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_avg_epu8&expand=397) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpavgb))] pub unsafe fn _mm512_avg_epu8(a: __m512i, b: __m512i) -> __m512i { transmute(vpavgb(a.as_u8x64(), b.as_u8x64())) @@ -5153,6 +5583,7 @@ pub unsafe fn _mm512_avg_epu8(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_avg_epu8&expand=398) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpavgb))] pub unsafe fn _mm512_mask_avg_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let avg = _mm512_avg_epu8(a, b).as_u8x64(); @@ -5164,6 +5595,7 @@ pub unsafe fn _mm512_mask_avg_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_avg_epu8&expand=399) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpavgb))] pub unsafe fn _mm512_maskz_avg_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let avg = _mm512_avg_epu8(a, b).as_u8x64(); @@ -5176,6 +5608,7 @@ pub unsafe fn _mm512_maskz_avg_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_avg_epu8&expand=395) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpavgb))] pub unsafe fn _mm256_mask_avg_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let avg = _mm256_avg_epu8(a, b).as_u8x32(); @@ -5187,6 +5620,7 @@ pub unsafe fn _mm256_mask_avg_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_avg_epu8&expand=396) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpavgb))] pub unsafe fn _mm256_maskz_avg_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let avg = _mm256_avg_epu8(a, b).as_u8x32(); @@ -5199,6 +5633,7 @@ pub unsafe fn _mm256_maskz_avg_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_avg_epu8&expand=392) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpavgb))] pub unsafe fn _mm_mask_avg_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let avg = _mm_avg_epu8(a, b).as_u8x16(); @@ -5210,6 +5645,7 @@ pub unsafe fn _mm_mask_avg_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_avg_epu8&expand=393) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpavgb))] pub unsafe fn _mm_maskz_avg_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let avg = _mm_avg_epu8(a, b).as_u8x16(); @@ -5222,6 +5658,7 @@ pub unsafe fn _mm_maskz_avg_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sll_epi16&expand=5271) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllw))] pub unsafe fn _mm512_sll_epi16(a: __m512i, count: __m128i) -> __m512i { transmute(vpsllw(a.as_i16x32(), count.as_i16x8())) @@ -5232,6 +5669,7 @@ pub unsafe fn _mm512_sll_epi16(a: __m512i, count: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sll_epi16&expand=5269) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllw))] pub unsafe fn _mm512_mask_sll_epi16( src: __m512i, @@ -5248,6 +5686,7 @@ pub unsafe fn _mm512_mask_sll_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sll_epi16&expand=5270) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllw))] pub unsafe fn _mm512_maskz_sll_epi16(k: __mmask32, a: __m512i, count: __m128i) -> __m512i { let shf = _mm512_sll_epi16(a, count).as_i16x32(); @@ -5260,6 +5699,7 @@ pub unsafe fn _mm512_maskz_sll_epi16(k: __mmask32, a: __m512i, count: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sll_epi16&expand=5266) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllw))] pub unsafe fn _mm256_mask_sll_epi16( src: __m256i, @@ -5276,6 +5716,7 @@ pub unsafe fn _mm256_mask_sll_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sll_epi16&expand=5267) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllw))] pub unsafe fn _mm256_maskz_sll_epi16(k: __mmask16, a: __m256i, count: __m128i) -> __m256i { let shf = _mm256_sll_epi16(a, count).as_i16x16(); @@ -5288,6 +5729,7 @@ pub unsafe fn _mm256_maskz_sll_epi16(k: __mmask16, a: __m256i, count: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sll_epi16&expand=5263) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllw))] pub unsafe fn _mm_mask_sll_epi16(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_sll_epi16(a, count).as_i16x8(); @@ -5299,6 +5741,7 @@ pub unsafe fn _mm_mask_sll_epi16(src: __m128i, k: __mmask8, a: __m128i, count: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sll_epi16&expand=5264) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllw))] pub unsafe fn _mm_maskz_sll_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_sll_epi16(a, count).as_i16x8(); @@ -5311,6 +5754,7 @@ pub unsafe fn _mm_maskz_sll_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_slli_epi16&expand=5301) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_slli_epi16(a: __m512i) -> __m512i { @@ -5327,6 +5771,7 @@ pub unsafe fn _mm512_slli_epi16(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_slli_epi16&expand=5299) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_slli_epi16( @@ -5348,6 +5793,7 @@ pub unsafe fn _mm512_mask_slli_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_slli_epi16&expand=5300) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_slli_epi16(k: __mmask32, a: __m512i) -> __m512i { @@ -5366,6 +5812,7 @@ pub unsafe fn _mm512_maskz_slli_epi16(k: __mmask32, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_slli_epi16&expand=5296) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_slli_epi16( @@ -5387,6 +5834,7 @@ pub unsafe fn _mm256_mask_slli_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_slli_epi16&expand=5297) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_slli_epi16(k: __mmask16, a: __m256i) -> __m256i { @@ -5405,6 +5853,7 @@ pub unsafe fn _mm256_maskz_slli_epi16(k: __mmask16, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_slli_epi16&expand=5293) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_slli_epi16( @@ -5426,6 +5875,7 @@ pub unsafe fn _mm_mask_slli_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_slli_epi16&expand=5294) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_slli_epi16(k: __mmask8, a: __m128i) -> __m128i { @@ -5444,6 +5894,7 @@ pub unsafe fn _mm_maskz_slli_epi16(k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sllv_epi16&expand=5333) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvw))] pub unsafe fn _mm512_sllv_epi16(a: __m512i, count: __m512i) -> __m512i { transmute(vpsllvw(a.as_i16x32(), count.as_i16x32())) @@ -5454,6 +5905,7 @@ pub unsafe fn _mm512_sllv_epi16(a: __m512i, count: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sllv_epi16&expand=5331) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvw))] pub unsafe fn _mm512_mask_sllv_epi16( src: __m512i, @@ -5470,6 +5922,7 @@ pub unsafe fn _mm512_mask_sllv_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sllv_epi16&expand=5332) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvw))] pub unsafe fn _mm512_maskz_sllv_epi16(k: __mmask32, a: __m512i, count: __m512i) -> __m512i { let shf = _mm512_sllv_epi16(a, count).as_i16x32(); @@ -5482,6 +5935,7 @@ pub unsafe fn _mm512_maskz_sllv_epi16(k: __mmask32, a: __m512i, count: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sllv_epi16&expand=5330) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvw))] pub unsafe fn _mm256_sllv_epi16(a: __m256i, count: __m256i) -> __m256i { transmute(vpsllvw256(a.as_i16x16(), count.as_i16x16())) @@ -5492,6 +5946,7 @@ pub unsafe fn _mm256_sllv_epi16(a: __m256i, count: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sllv_epi16&expand=5328) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvw))] pub unsafe fn _mm256_mask_sllv_epi16( src: __m256i, @@ -5508,6 +5963,7 @@ pub unsafe fn _mm256_mask_sllv_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sllv_epi16&expand=5329) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvw))] pub unsafe fn _mm256_maskz_sllv_epi16(k: __mmask16, a: __m256i, count: __m256i) -> __m256i { let shf = _mm256_sllv_epi16(a, count).as_i16x16(); @@ -5520,6 +5976,7 @@ pub unsafe fn _mm256_maskz_sllv_epi16(k: __mmask16, a: __m256i, count: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sllv_epi16&expand=5327) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvw))] pub unsafe fn _mm_sllv_epi16(a: __m128i, count: __m128i) -> __m128i { transmute(vpsllvw128(a.as_i16x8(), count.as_i16x8())) @@ -5530,6 +5987,7 @@ pub unsafe fn _mm_sllv_epi16(a: __m128i, count: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sllv_epi16&expand=5325) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvw))] pub unsafe fn _mm_mask_sllv_epi16( src: __m128i, @@ -5546,6 +6004,7 @@ pub unsafe fn _mm_mask_sllv_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sllv_epi16&expand=5326) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvw))] pub unsafe fn _mm_maskz_sllv_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_sllv_epi16(a, count).as_i16x8(); @@ -5558,6 +6017,7 @@ pub unsafe fn _mm_maskz_sllv_epi16(k: __mmask8, a: __m128i, count: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srl_epi16&expand=5483) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlw))] pub unsafe fn _mm512_srl_epi16(a: __m512i, count: __m128i) -> __m512i { transmute(vpsrlw(a.as_i16x32(), count.as_i16x8())) @@ -5568,6 +6028,7 @@ pub unsafe fn _mm512_srl_epi16(a: __m512i, count: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srl_epi16&expand=5481) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlw))] pub unsafe fn _mm512_mask_srl_epi16( src: __m512i, @@ -5584,6 +6045,7 @@ pub unsafe fn _mm512_mask_srl_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srl_epi16&expand=5482) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlw))] pub unsafe fn _mm512_maskz_srl_epi16(k: __mmask32, a: __m512i, count: __m128i) -> __m512i { let shf = _mm512_srl_epi16(a, count).as_i16x32(); @@ -5596,6 +6058,7 @@ pub unsafe fn _mm512_maskz_srl_epi16(k: __mmask32, a: __m512i, count: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srl_epi16&expand=5478) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlw))] pub unsafe fn _mm256_mask_srl_epi16( src: __m256i, @@ -5612,6 +6075,7 @@ pub unsafe fn _mm256_mask_srl_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srl_epi16&expand=5479) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlw))] pub unsafe fn _mm256_maskz_srl_epi16(k: __mmask16, a: __m256i, count: __m128i) -> __m256i { let shf = _mm256_srl_epi16(a, count).as_i16x16(); @@ -5624,6 +6088,7 @@ pub unsafe fn _mm256_maskz_srl_epi16(k: __mmask16, a: __m256i, count: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srl_epi16&expand=5475) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlw))] pub unsafe fn _mm_mask_srl_epi16(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_srl_epi16(a, count).as_i16x8(); @@ -5635,6 +6100,7 @@ pub unsafe fn _mm_mask_srl_epi16(src: __m128i, k: __mmask8, a: __m128i, count: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srl_epi16&expand=5476) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlw))] pub unsafe fn _mm_maskz_srl_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_srl_epi16(a, count).as_i16x8(); @@ -5647,6 +6113,7 @@ pub unsafe fn _mm_maskz_srl_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srli_epi16&expand=5513) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_srli_epi16(a: __m512i) -> __m512i { @@ -5663,6 +6130,7 @@ pub unsafe fn _mm512_srli_epi16(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srli_epi16&expand=5511) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_srli_epi16( @@ -5684,6 +6152,7 @@ pub unsafe fn _mm512_mask_srli_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srli_epi16&expand=5512) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_srli_epi16(k: __mmask32, a: __m512i) -> __m512i { @@ -5703,6 +6172,7 @@ pub unsafe fn _mm512_maskz_srli_epi16(k: __mmask32, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srli_epi16&expand=5508) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_srli_epi16( @@ -5720,6 +6190,7 @@ pub unsafe fn _mm256_mask_srli_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srli_epi16&expand=5509) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_srli_epi16(k: __mmask16, a: __m256i) -> __m256i { @@ -5734,6 +6205,7 @@ pub unsafe fn _mm256_maskz_srli_epi16(k: __mmask16, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srli_epi16&expand=5505) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_srli_epi16( @@ -5751,6 +6223,7 @@ pub unsafe fn _mm_mask_srli_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srli_epi16&expand=5506) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_srli_epi16(k: __mmask8, a: __m128i) -> __m128i { @@ -5765,6 +6238,7 @@ pub unsafe fn _mm_maskz_srli_epi16(k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srlv_epi16&expand=5545) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvw))] pub unsafe fn _mm512_srlv_epi16(a: __m512i, count: __m512i) -> __m512i { transmute(vpsrlvw(a.as_i16x32(), count.as_i16x32())) @@ -5775,6 +6249,7 @@ pub unsafe fn _mm512_srlv_epi16(a: __m512i, count: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srlv_epi16&expand=5543) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvw))] pub unsafe fn _mm512_mask_srlv_epi16( src: __m512i, @@ -5791,6 +6266,7 @@ pub unsafe fn _mm512_mask_srlv_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srlv_epi16&expand=5544) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvw))] pub unsafe fn _mm512_maskz_srlv_epi16(k: __mmask32, a: __m512i, count: __m512i) -> __m512i { let shf = _mm512_srlv_epi16(a, count).as_i16x32(); @@ -5803,6 +6279,7 @@ pub unsafe fn _mm512_maskz_srlv_epi16(k: __mmask32, a: __m512i, count: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srlv_epi16&expand=5542) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvw))] pub unsafe fn _mm256_srlv_epi16(a: __m256i, count: __m256i) -> __m256i { transmute(vpsrlvw256(a.as_i16x16(), count.as_i16x16())) @@ -5813,6 +6290,7 @@ pub unsafe fn _mm256_srlv_epi16(a: __m256i, count: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srlv_epi16&expand=5540) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvw))] pub unsafe fn _mm256_mask_srlv_epi16( src: __m256i, @@ -5829,6 +6307,7 @@ pub unsafe fn _mm256_mask_srlv_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srlv_epi16&expand=5541) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvw))] pub unsafe fn _mm256_maskz_srlv_epi16(k: __mmask16, a: __m256i, count: __m256i) -> __m256i { let shf = _mm256_srlv_epi16(a, count).as_i16x16(); @@ -5841,6 +6320,7 @@ pub unsafe fn _mm256_maskz_srlv_epi16(k: __mmask16, a: __m256i, count: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srlv_epi16&expand=5539) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvw))] pub unsafe fn _mm_srlv_epi16(a: __m128i, count: __m128i) -> __m128i { transmute(vpsrlvw128(a.as_i16x8(), count.as_i16x8())) @@ -5851,6 +6331,7 @@ pub unsafe fn _mm_srlv_epi16(a: __m128i, count: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srlv_epi16&expand=5537) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvw))] pub unsafe fn _mm_mask_srlv_epi16( src: __m128i, @@ -5867,6 +6348,7 @@ pub unsafe fn _mm_mask_srlv_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srlv_epi16&expand=5538) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvw))] pub unsafe fn _mm_maskz_srlv_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_srlv_epi16(a, count).as_i16x8(); @@ -5879,6 +6361,7 @@ pub unsafe fn _mm_maskz_srlv_epi16(k: __mmask8, a: __m128i, count: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sra_epi16&expand=5398) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraw))] pub unsafe fn _mm512_sra_epi16(a: __m512i, count: __m128i) -> __m512i { transmute(vpsraw(a.as_i16x32(), count.as_i16x8())) @@ -5889,6 +6372,7 @@ pub unsafe fn _mm512_sra_epi16(a: __m512i, count: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sra_epi16&expand=5396) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraw))] pub unsafe fn _mm512_mask_sra_epi16( src: __m512i, @@ -5905,6 +6389,7 @@ pub unsafe fn _mm512_mask_sra_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sra_epi16&expand=5397) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraw))] pub unsafe fn _mm512_maskz_sra_epi16(k: __mmask32, a: __m512i, count: __m128i) -> __m512i { let shf = _mm512_sra_epi16(a, count).as_i16x32(); @@ -5917,6 +6402,7 @@ pub unsafe fn _mm512_maskz_sra_epi16(k: __mmask32, a: __m512i, count: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sra_epi16&expand=5393) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraw))] pub unsafe fn _mm256_mask_sra_epi16( src: __m256i, @@ -5933,6 +6419,7 @@ pub unsafe fn _mm256_mask_sra_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sra_epi16&expand=5394) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraw))] pub unsafe fn _mm256_maskz_sra_epi16(k: __mmask16, a: __m256i, count: __m128i) -> __m256i { let shf = _mm256_sra_epi16(a, count).as_i16x16(); @@ -5945,6 +6432,7 @@ pub unsafe fn _mm256_maskz_sra_epi16(k: __mmask16, a: __m256i, count: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sra_epi16&expand=5390) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraw))] pub unsafe fn _mm_mask_sra_epi16(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_sra_epi16(a, count).as_i16x8(); @@ -5956,6 +6444,7 @@ pub unsafe fn _mm_mask_sra_epi16(src: __m128i, k: __mmask8, a: __m128i, count: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sra_epi16&expand=5391) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraw))] pub unsafe fn _mm_maskz_sra_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_sra_epi16(a, count).as_i16x8(); @@ -5968,6 +6457,7 @@ pub unsafe fn _mm_maskz_sra_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srai_epi16&expand=5427) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_srai_epi16(a: __m512i) -> __m512i { @@ -5980,6 +6470,7 @@ pub unsafe fn _mm512_srai_epi16(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srai_epi16&expand=5425) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_srai_epi16( @@ -5997,6 +6488,7 @@ pub unsafe fn _mm512_mask_srai_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srai_epi16&expand=5426) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_srai_epi16(k: __mmask32, a: __m512i) -> __m512i { @@ -6011,6 +6503,7 @@ pub unsafe fn _mm512_maskz_srai_epi16(k: __mmask32, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srai_epi16&expand=5422) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_srai_epi16( @@ -6028,6 +6521,7 @@ pub unsafe fn _mm256_mask_srai_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srai_epi16&expand=5423) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_srai_epi16(k: __mmask16, a: __m256i) -> __m256i { @@ -6042,6 +6536,7 @@ pub unsafe fn _mm256_maskz_srai_epi16(k: __mmask16, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srai_epi16&expand=5419) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_srai_epi16( @@ -6059,6 +6554,7 @@ pub unsafe fn _mm_mask_srai_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srai_epi16&expand=5420) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_srai_epi16(k: __mmask8, a: __m128i) -> __m128i { @@ -6073,6 +6569,7 @@ pub unsafe fn _mm_maskz_srai_epi16(k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srav_epi16&expand=5456) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravw))] pub unsafe fn _mm512_srav_epi16(a: __m512i, count: __m512i) -> __m512i { transmute(vpsravw(a.as_i16x32(), count.as_i16x32())) @@ -6083,6 +6580,7 @@ pub unsafe fn _mm512_srav_epi16(a: __m512i, count: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srav_epi16&expand=5454) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravw))] pub unsafe fn _mm512_mask_srav_epi16( src: __m512i, @@ -6099,6 +6597,7 @@ pub unsafe fn _mm512_mask_srav_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srav_epi16&expand=5455) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravw))] pub unsafe fn _mm512_maskz_srav_epi16(k: __mmask32, a: __m512i, count: __m512i) -> __m512i { let shf = _mm512_srav_epi16(a, count).as_i16x32(); @@ -6111,6 +6610,7 @@ pub unsafe fn _mm512_maskz_srav_epi16(k: __mmask32, a: __m512i, count: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srav_epi16&expand=5453) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravw))] pub unsafe fn _mm256_srav_epi16(a: __m256i, count: __m256i) -> __m256i { transmute(vpsravw256(a.as_i16x16(), count.as_i16x16())) @@ -6121,6 +6621,7 @@ pub unsafe fn _mm256_srav_epi16(a: __m256i, count: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srav_epi16&expand=5451) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravw))] pub unsafe fn _mm256_mask_srav_epi16( src: __m256i, @@ -6137,6 +6638,7 @@ pub unsafe fn _mm256_mask_srav_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srav_epi16&expand=5452) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravw))] pub unsafe fn _mm256_maskz_srav_epi16(k: __mmask16, a: __m256i, count: __m256i) -> __m256i { let shf = _mm256_srav_epi16(a, count).as_i16x16(); @@ -6149,6 +6651,7 @@ pub unsafe fn _mm256_maskz_srav_epi16(k: __mmask16, a: __m256i, count: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srav_epi16&expand=5450) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravw))] pub unsafe fn _mm_srav_epi16(a: __m128i, count: __m128i) -> __m128i { transmute(vpsravw128(a.as_i16x8(), count.as_i16x8())) @@ -6159,6 +6662,7 @@ pub unsafe fn _mm_srav_epi16(a: __m128i, count: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srav_epi16&expand=5448) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravw))] pub unsafe fn _mm_mask_srav_epi16( src: __m128i, @@ -6175,6 +6679,7 @@ pub unsafe fn _mm_mask_srav_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srav_epi16&expand=5449) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravw))] pub unsafe fn _mm_maskz_srav_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_srav_epi16(a, count).as_i16x8(); @@ -6187,6 +6692,7 @@ pub unsafe fn _mm_maskz_srav_epi16(k: __mmask8, a: __m128i, count: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutex2var_epi16&expand=4226) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2w or vpermt2w pub unsafe fn _mm512_permutex2var_epi16(a: __m512i, idx: __m512i, b: __m512i) -> __m512i { transmute(vpermi2w(a.as_i16x32(), idx.as_i16x32(), b.as_i16x32())) @@ -6197,6 +6703,7 @@ pub unsafe fn _mm512_permutex2var_epi16(a: __m512i, idx: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutex2var_epi16&expand=4223) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2w))] pub unsafe fn _mm512_mask_permutex2var_epi16( a: __m512i, @@ -6213,6 +6720,7 @@ pub unsafe fn _mm512_mask_permutex2var_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutex2var_epi16&expand=4225) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2w or vpermt2w pub unsafe fn _mm512_maskz_permutex2var_epi16( k: __mmask32, @@ -6230,6 +6738,7 @@ pub unsafe fn _mm512_maskz_permutex2var_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask2_permutex2var_epi16&expand=4224) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermi2w))] pub unsafe fn _mm512_mask2_permutex2var_epi16( a: __m512i, @@ -6246,6 +6755,7 @@ pub unsafe fn _mm512_mask2_permutex2var_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutex2var_epi16&expand=4222) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2w or vpermt2w pub unsafe fn _mm256_permutex2var_epi16(a: __m256i, idx: __m256i, b: __m256i) -> __m256i { transmute(vpermi2w256(a.as_i16x16(), idx.as_i16x16(), b.as_i16x16())) @@ -6256,6 +6766,7 @@ pub unsafe fn _mm256_permutex2var_epi16(a: __m256i, idx: __m256i, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutex2var_epi16&expand=4219) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2w))] pub unsafe fn _mm256_mask_permutex2var_epi16( a: __m256i, @@ -6272,6 +6783,7 @@ pub unsafe fn _mm256_mask_permutex2var_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutex2var_epi16&expand=4221) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2w or vpermt2w pub unsafe fn _mm256_maskz_permutex2var_epi16( k: __mmask16, @@ -6289,6 +6801,7 @@ pub unsafe fn _mm256_maskz_permutex2var_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask2_permutex2var_epi16&expand=4220) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermi2w))] pub unsafe fn _mm256_mask2_permutex2var_epi16( a: __m256i, @@ -6305,6 +6818,7 @@ pub unsafe fn _mm256_mask2_permutex2var_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutex2var_epi16&expand=4218) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2w or vpermt2w pub unsafe fn _mm_permutex2var_epi16(a: __m128i, idx: __m128i, b: __m128i) -> __m128i { transmute(vpermi2w128(a.as_i16x8(), idx.as_i16x8(), b.as_i16x8())) @@ -6315,6 +6829,7 @@ pub unsafe fn _mm_permutex2var_epi16(a: __m128i, idx: __m128i, b: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutex2var_epi16&expand=4215) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2w))] pub unsafe fn _mm_mask_permutex2var_epi16( a: __m128i, @@ -6331,6 +6846,7 @@ pub unsafe fn _mm_mask_permutex2var_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutex2var_epi16&expand=4217) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2w or vpermt2w pub unsafe fn _mm_maskz_permutex2var_epi16( k: __mmask8, @@ -6348,6 +6864,7 @@ pub unsafe fn _mm_maskz_permutex2var_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask2_permutex2var_epi16&expand=4216) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermi2w))] pub unsafe fn _mm_mask2_permutex2var_epi16( a: __m128i, @@ -6364,6 +6881,7 @@ pub unsafe fn _mm_mask2_permutex2var_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutexvar_epi16&expand=4295) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermw))] pub unsafe fn _mm512_permutexvar_epi16(idx: __m512i, a: __m512i) -> __m512i { transmute(vpermw(a.as_i16x32(), idx.as_i16x32())) @@ -6374,6 +6892,7 @@ pub unsafe fn _mm512_permutexvar_epi16(idx: __m512i, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutexvar_epi16&expand=4293) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermw))] pub unsafe fn _mm512_mask_permutexvar_epi16( src: __m512i, @@ -6390,6 +6909,7 @@ pub unsafe fn _mm512_mask_permutexvar_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutexvar_epi16&expand=4294) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermw))] pub unsafe fn _mm512_maskz_permutexvar_epi16(k: __mmask32, idx: __m512i, a: __m512i) -> __m512i { let permute = _mm512_permutexvar_epi16(idx, a).as_i16x32(); @@ -6402,6 +6922,7 @@ pub unsafe fn _mm512_maskz_permutexvar_epi16(k: __mmask32, idx: __m512i, a: __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutexvar_epi16&expand=4292) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermw))] pub unsafe fn _mm256_permutexvar_epi16(idx: __m256i, a: __m256i) -> __m256i { transmute(vpermw256(a.as_i16x16(), idx.as_i16x16())) @@ -6412,6 +6933,7 @@ pub unsafe fn _mm256_permutexvar_epi16(idx: __m256i, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutexvar_epi16&expand=4290) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermw))] pub unsafe fn _mm256_mask_permutexvar_epi16( src: __m256i, @@ -6428,6 +6950,7 @@ pub unsafe fn _mm256_mask_permutexvar_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutexvar_epi16&expand=4291) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermw))] pub unsafe fn _mm256_maskz_permutexvar_epi16(k: __mmask16, idx: __m256i, a: __m256i) -> __m256i { let permute = _mm256_permutexvar_epi16(idx, a).as_i16x16(); @@ -6440,6 +6963,7 @@ pub unsafe fn _mm256_maskz_permutexvar_epi16(k: __mmask16, idx: __m256i, a: __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutexvar_epi16&expand=4289) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermw))] pub unsafe fn _mm_permutexvar_epi16(idx: __m128i, a: __m128i) -> __m128i { transmute(vpermw128(a.as_i16x8(), idx.as_i16x8())) @@ -6450,6 +6974,7 @@ pub unsafe fn _mm_permutexvar_epi16(idx: __m128i, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutexvar_epi16&expand=4287) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermw))] pub unsafe fn _mm_mask_permutexvar_epi16( src: __m128i, @@ -6466,6 +6991,7 @@ pub unsafe fn _mm_mask_permutexvar_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutexvar_epi16&expand=4288) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermw))] pub unsafe fn _mm_maskz_permutexvar_epi16(k: __mmask8, idx: __m128i, a: __m128i) -> __m128i { let permute = _mm_permutexvar_epi16(idx, a).as_i16x8(); @@ -6478,6 +7004,7 @@ pub unsafe fn _mm_maskz_permutexvar_epi16(k: __mmask8, idx: __m128i, a: __m128i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_blend_epi16&expand=430) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu16))] //should be vpblendmw pub unsafe fn _mm512_mask_blend_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { transmute(simd_select_bitmask(k, b.as_i16x32(), a.as_i16x32())) @@ -6488,6 +7015,7 @@ pub unsafe fn _mm512_mask_blend_epi16(k: __mmask32, a: __m512i, b: __m512i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_blend_epi16&expand=429) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu16))] //should be vpblendmw pub unsafe fn _mm256_mask_blend_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { transmute(simd_select_bitmask(k, b.as_i16x16(), a.as_i16x16())) @@ -6498,6 +7026,7 @@ pub unsafe fn _mm256_mask_blend_epi16(k: __mmask16, a: __m256i, b: __m256i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_blend_epi16&expand=427) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu16))] //should be vpblendmw pub unsafe fn _mm_mask_blend_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { transmute(simd_select_bitmask(k, b.as_i16x8(), a.as_i16x8())) @@ -6508,6 +7037,7 @@ pub unsafe fn _mm_mask_blend_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_blend_epi8&expand=441) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu8))] //should be vpblendmb pub unsafe fn _mm512_mask_blend_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { transmute(simd_select_bitmask(k, b.as_i8x64(), a.as_i8x64())) @@ -6518,6 +7048,7 @@ pub unsafe fn _mm512_mask_blend_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_blend_epi8&expand=440) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu8))] //should be vpblendmb pub unsafe fn _mm256_mask_blend_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { transmute(simd_select_bitmask(k, b.as_i8x32(), a.as_i8x32())) @@ -6528,6 +7059,7 @@ pub unsafe fn _mm256_mask_blend_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_blend_epi8&expand=439) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu8))] //should be vpblendmb pub unsafe fn _mm_mask_blend_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { transmute(simd_select_bitmask(k, b.as_i8x16(), a.as_i8x16())) @@ -6538,6 +7070,7 @@ pub unsafe fn _mm_mask_blend_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastw_epi16&expand=587) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastw))] pub unsafe fn _mm512_broadcastw_epi16(a: __m128i) -> __m512i { let a = _mm512_castsi128_si512(a).as_i16x32(); @@ -6557,6 +7090,7 @@ pub unsafe fn _mm512_broadcastw_epi16(a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_broadcastw_epi16&expand=588) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastw))] pub unsafe fn _mm512_mask_broadcastw_epi16(src: __m512i, k: __mmask32, a: __m128i) -> __m512i { let broadcast = _mm512_broadcastw_epi16(a).as_i16x32(); @@ -6568,6 +7102,7 @@ pub unsafe fn _mm512_mask_broadcastw_epi16(src: __m512i, k: __mmask32, a: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_broadcastw_epi16&expand=589) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastw))] pub unsafe fn _mm512_maskz_broadcastw_epi16(k: __mmask32, a: __m128i) -> __m512i { let broadcast = _mm512_broadcastw_epi16(a).as_i16x32(); @@ -6580,6 +7115,7 @@ pub unsafe fn _mm512_maskz_broadcastw_epi16(k: __mmask32, a: __m128i) -> __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_broadcastw_epi16&expand=585) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastw))] pub unsafe fn _mm256_mask_broadcastw_epi16(src: __m256i, k: __mmask16, a: __m128i) -> __m256i { let broadcast = _mm256_broadcastw_epi16(a).as_i16x16(); @@ -6591,6 +7127,7 @@ pub unsafe fn _mm256_mask_broadcastw_epi16(src: __m256i, k: __mmask16, a: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_broadcastw_epi16&expand=586) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastw))] pub unsafe fn _mm256_maskz_broadcastw_epi16(k: __mmask16, a: __m128i) -> __m256i { let broadcast = _mm256_broadcastw_epi16(a).as_i16x16(); @@ -6603,6 +7140,7 @@ pub unsafe fn _mm256_maskz_broadcastw_epi16(k: __mmask16, a: __m128i) -> __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_broadcastw_epi16&expand=582) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastw))] pub unsafe fn _mm_mask_broadcastw_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let broadcast = _mm_broadcastw_epi16(a).as_i16x8(); @@ -6614,6 +7152,7 @@ pub unsafe fn _mm_mask_broadcastw_epi16(src: __m128i, k: __mmask8, a: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_broadcastw_epi16&expand=583) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastw))] pub unsafe fn _mm_maskz_broadcastw_epi16(k: __mmask8, a: __m128i) -> __m128i { let broadcast = _mm_broadcastw_epi16(a).as_i16x8(); @@ -6626,6 +7165,7 @@ pub unsafe fn _mm_maskz_broadcastw_epi16(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastb_epi8&expand=536) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastb))] pub unsafe fn _mm512_broadcastb_epi8(a: __m128i) -> __m512i { let a = _mm512_castsi128_si512(a).as_i8x64(); @@ -6646,6 +7186,7 @@ pub unsafe fn _mm512_broadcastb_epi8(a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_broadcastb_epi8&expand=537) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastb))] pub unsafe fn _mm512_mask_broadcastb_epi8(src: __m512i, k: __mmask64, a: __m128i) -> __m512i { let broadcast = _mm512_broadcastb_epi8(a).as_i8x64(); @@ -6657,6 +7198,7 @@ pub unsafe fn _mm512_mask_broadcastb_epi8(src: __m512i, k: __mmask64, a: __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_broadcastb_epi8&expand=538) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastb))] pub unsafe fn _mm512_maskz_broadcastb_epi8(k: __mmask64, a: __m128i) -> __m512i { let broadcast = _mm512_broadcastb_epi8(a).as_i8x64(); @@ -6669,6 +7211,7 @@ pub unsafe fn _mm512_maskz_broadcastb_epi8(k: __mmask64, a: __m128i) -> __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_broadcastb_epi8&expand=534) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastb))] pub unsafe fn _mm256_mask_broadcastb_epi8(src: __m256i, k: __mmask32, a: __m128i) -> __m256i { let broadcast = _mm256_broadcastb_epi8(a).as_i8x32(); @@ -6680,6 +7223,7 @@ pub unsafe fn _mm256_mask_broadcastb_epi8(src: __m256i, k: __mmask32, a: __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_broadcastb_epi8&expand=535) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastb))] pub unsafe fn _mm256_maskz_broadcastb_epi8(k: __mmask32, a: __m128i) -> __m256i { let broadcast = _mm256_broadcastb_epi8(a).as_i8x32(); @@ -6692,6 +7236,7 @@ pub unsafe fn _mm256_maskz_broadcastb_epi8(k: __mmask32, a: __m128i) -> __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_broadcastb_epi8&expand=531) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastb))] pub unsafe fn _mm_mask_broadcastb_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m128i { let broadcast = _mm_broadcastb_epi8(a).as_i8x16(); @@ -6703,6 +7248,7 @@ pub unsafe fn _mm_mask_broadcastb_epi8(src: __m128i, k: __mmask16, a: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_broadcastb_epi8&expand=532) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastb))] pub unsafe fn _mm_maskz_broadcastb_epi8(k: __mmask16, a: __m128i) -> __m128i { let broadcast = _mm_broadcastb_epi8(a).as_i8x16(); @@ -6715,6 +7261,7 @@ pub unsafe fn _mm_maskz_broadcastb_epi8(k: __mmask16, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpackhi_epi16&expand=6012) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhwd))] pub unsafe fn _mm512_unpackhi_epi16(a: __m512i, b: __m512i) -> __m512i { let a = a.as_i16x32(); @@ -6742,6 +7289,7 @@ pub unsafe fn _mm512_unpackhi_epi16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpackhi_epi16&expand=6010) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhwd))] pub unsafe fn _mm512_mask_unpackhi_epi16( src: __m512i, @@ -6758,6 +7306,7 @@ pub unsafe fn _mm512_mask_unpackhi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpackhi_epi16&expand=6011) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhwd))] pub unsafe fn _mm512_maskz_unpackhi_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let unpackhi = _mm512_unpackhi_epi16(a, b).as_i16x32(); @@ -6770,6 +7319,7 @@ pub unsafe fn _mm512_maskz_unpackhi_epi16(k: __mmask32, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpackhi_epi16&expand=6007) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhwd))] pub unsafe fn _mm256_mask_unpackhi_epi16( src: __m256i, @@ -6786,6 +7336,7 @@ pub unsafe fn _mm256_mask_unpackhi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpackhi_epi16&expand=6008) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhwd))] pub unsafe fn _mm256_maskz_unpackhi_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let unpackhi = _mm256_unpackhi_epi16(a, b).as_i16x16(); @@ -6798,6 +7349,7 @@ pub unsafe fn _mm256_maskz_unpackhi_epi16(k: __mmask16, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpackhi_epi16&expand=6004) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhwd))] pub unsafe fn _mm_mask_unpackhi_epi16( src: __m128i, @@ -6814,6 +7366,7 @@ pub unsafe fn _mm_mask_unpackhi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpackhi_epi16&expand=6005) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhwd))] pub unsafe fn _mm_maskz_unpackhi_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let unpackhi = _mm_unpackhi_epi16(a, b).as_i16x8(); @@ -6826,6 +7379,7 @@ pub unsafe fn _mm_maskz_unpackhi_epi16(k: __mmask8, a: __m128i, b: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpackhi_epi8&expand=6039) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhbw))] pub unsafe fn _mm512_unpackhi_epi8(a: __m512i, b: __m512i) -> __m512i { let a = a.as_i8x64(); @@ -6861,6 +7415,7 @@ pub unsafe fn _mm512_unpackhi_epi8(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpackhi_epi8&expand=6037) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhbw))] pub unsafe fn _mm512_mask_unpackhi_epi8( src: __m512i, @@ -6877,6 +7432,7 @@ pub unsafe fn _mm512_mask_unpackhi_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpackhi_epi8&expand=6038) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhbw))] pub unsafe fn _mm512_maskz_unpackhi_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let unpackhi = _mm512_unpackhi_epi8(a, b).as_i8x64(); @@ -6889,6 +7445,7 @@ pub unsafe fn _mm512_maskz_unpackhi_epi8(k: __mmask64, a: __m512i, b: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpackhi_epi8&expand=6034) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhbw))] pub unsafe fn _mm256_mask_unpackhi_epi8( src: __m256i, @@ -6905,6 +7462,7 @@ pub unsafe fn _mm256_mask_unpackhi_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpackhi_epi8&expand=6035) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhbw))] pub unsafe fn _mm256_maskz_unpackhi_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let unpackhi = _mm256_unpackhi_epi8(a, b).as_i8x32(); @@ -6917,6 +7475,7 @@ pub unsafe fn _mm256_maskz_unpackhi_epi8(k: __mmask32, a: __m256i, b: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpackhi_epi8&expand=6031) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhbw))] pub unsafe fn _mm_mask_unpackhi_epi8( src: __m128i, @@ -6933,6 +7492,7 @@ pub unsafe fn _mm_mask_unpackhi_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpackhi_epi8&expand=6032) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhbw))] pub unsafe fn _mm_maskz_unpackhi_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let unpackhi = _mm_unpackhi_epi8(a, b).as_i8x16(); @@ -6945,6 +7505,7 @@ pub unsafe fn _mm_maskz_unpackhi_epi8(k: __mmask16, a: __m128i, b: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpacklo_epi16&expand=6069) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklwd))] pub unsafe fn _mm512_unpacklo_epi16(a: __m512i, b: __m512i) -> __m512i { let a = a.as_i16x32(); @@ -6972,6 +7533,7 @@ pub unsafe fn _mm512_unpacklo_epi16(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpacklo_epi16&expand=6067) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklwd))] pub unsafe fn _mm512_mask_unpacklo_epi16( src: __m512i, @@ -6988,6 +7550,7 @@ pub unsafe fn _mm512_mask_unpacklo_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpacklo_epi16&expand=6068) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklwd))] pub unsafe fn _mm512_maskz_unpacklo_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __m512i { let unpacklo = _mm512_unpacklo_epi16(a, b).as_i16x32(); @@ -7000,6 +7563,7 @@ pub unsafe fn _mm512_maskz_unpacklo_epi16(k: __mmask32, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpacklo_epi16&expand=6064) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklwd))] pub unsafe fn _mm256_mask_unpacklo_epi16( src: __m256i, @@ -7016,6 +7580,7 @@ pub unsafe fn _mm256_mask_unpacklo_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpacklo_epi16&expand=6065) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklwd))] pub unsafe fn _mm256_maskz_unpacklo_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __m256i { let unpacklo = _mm256_unpacklo_epi16(a, b).as_i16x16(); @@ -7028,6 +7593,7 @@ pub unsafe fn _mm256_maskz_unpacklo_epi16(k: __mmask16, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpacklo_epi16&expand=6061) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklwd))] pub unsafe fn _mm_mask_unpacklo_epi16( src: __m128i, @@ -7044,6 +7610,7 @@ pub unsafe fn _mm_mask_unpacklo_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpacklo_epi16&expand=6062) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklwd))] pub unsafe fn _mm_maskz_unpacklo_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let unpacklo = _mm_unpacklo_epi16(a, b).as_i16x8(); @@ -7056,6 +7623,7 @@ pub unsafe fn _mm_maskz_unpacklo_epi16(k: __mmask8, a: __m128i, b: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpacklo_epi8&expand=6096) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklbw))] pub unsafe fn _mm512_unpacklo_epi8(a: __m512i, b: __m512i) -> __m512i { let a = a.as_i8x64(); @@ -7091,6 +7659,7 @@ pub unsafe fn _mm512_unpacklo_epi8(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpacklo_epi8&expand=6094) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklbw))] pub unsafe fn _mm512_mask_unpacklo_epi8( src: __m512i, @@ -7107,6 +7676,7 @@ pub unsafe fn _mm512_mask_unpacklo_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpacklo_epi8&expand=6095) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklbw))] pub unsafe fn _mm512_maskz_unpacklo_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let unpacklo = _mm512_unpacklo_epi8(a, b).as_i8x64(); @@ -7119,6 +7689,7 @@ pub unsafe fn _mm512_maskz_unpacklo_epi8(k: __mmask64, a: __m512i, b: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpacklo_epi8&expand=6091) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklbw))] pub unsafe fn _mm256_mask_unpacklo_epi8( src: __m256i, @@ -7135,6 +7706,7 @@ pub unsafe fn _mm256_mask_unpacklo_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpacklo_epi8&expand=6092) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklbw))] pub unsafe fn _mm256_maskz_unpacklo_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let unpacklo = _mm256_unpacklo_epi8(a, b).as_i8x32(); @@ -7147,6 +7719,7 @@ pub unsafe fn _mm256_maskz_unpacklo_epi8(k: __mmask32, a: __m256i, b: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpacklo_epi8&expand=6088) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklbw))] pub unsafe fn _mm_mask_unpacklo_epi8( src: __m128i, @@ -7163,6 +7736,7 @@ pub unsafe fn _mm_mask_unpacklo_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpacklo_epi8&expand=6089) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklbw))] pub unsafe fn _mm_maskz_unpacklo_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let unpacklo = _mm_unpacklo_epi8(a, b).as_i8x16(); @@ -7175,6 +7749,7 @@ pub unsafe fn _mm_maskz_unpacklo_epi8(k: __mmask16, a: __m128i, b: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mov_epi16&expand=3795) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu16))] pub unsafe fn _mm512_mask_mov_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i { let mov = a.as_i16x32(); @@ -7186,6 +7761,7 @@ pub unsafe fn _mm512_mask_mov_epi16(src: __m512i, k: __mmask32, a: __m512i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mov_epi16&expand=3796) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu16))] pub unsafe fn _mm512_maskz_mov_epi16(k: __mmask32, a: __m512i) -> __m512i { let mov = a.as_i16x32(); @@ -7198,6 +7774,7 @@ pub unsafe fn _mm512_maskz_mov_epi16(k: __mmask32, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mov_epi16&expand=3793) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu16))] pub unsafe fn _mm256_mask_mov_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i { let mov = a.as_i16x16(); @@ -7209,6 +7786,7 @@ pub unsafe fn _mm256_mask_mov_epi16(src: __m256i, k: __mmask16, a: __m256i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mov_epi16&expand=3794) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu16))] pub unsafe fn _mm256_maskz_mov_epi16(k: __mmask16, a: __m256i) -> __m256i { let mov = a.as_i16x16(); @@ -7221,6 +7799,7 @@ pub unsafe fn _mm256_maskz_mov_epi16(k: __mmask16, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mov_epi16&expand=3791) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu16))] pub unsafe fn _mm_mask_mov_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let mov = a.as_i16x8(); @@ -7232,6 +7811,7 @@ pub unsafe fn _mm_mask_mov_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mov_epi16&expand=3792) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu16))] pub unsafe fn _mm_maskz_mov_epi16(k: __mmask8, a: __m128i) -> __m128i { let mov = a.as_i16x8(); @@ -7244,6 +7824,7 @@ pub unsafe fn _mm_maskz_mov_epi16(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mov_epi8&expand=3813) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu8))] pub unsafe fn _mm512_mask_mov_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __m512i { let mov = a.as_i8x64(); @@ -7255,6 +7836,7 @@ pub unsafe fn _mm512_mask_mov_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mov_epi8&expand=3814) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu8))] pub unsafe fn _mm512_maskz_mov_epi8(k: __mmask64, a: __m512i) -> __m512i { let mov = a.as_i8x64(); @@ -7267,6 +7849,7 @@ pub unsafe fn _mm512_maskz_mov_epi8(k: __mmask64, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mov_epi8&expand=3811) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu8))] pub unsafe fn _mm256_mask_mov_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __m256i { let mov = a.as_i8x32(); @@ -7278,6 +7861,7 @@ pub unsafe fn _mm256_mask_mov_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mov_epi8&expand=3812) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu8))] pub unsafe fn _mm256_maskz_mov_epi8(k: __mmask32, a: __m256i) -> __m256i { let mov = a.as_i8x32(); @@ -7290,6 +7874,7 @@ pub unsafe fn _mm256_maskz_mov_epi8(k: __mmask32, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mov_epi8&expand=3809) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu8))] pub unsafe fn _mm_mask_mov_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m128i { let mov = a.as_i8x16(); @@ -7301,6 +7886,7 @@ pub unsafe fn _mm_mask_mov_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mov_epi8&expand=3810) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqu8))] pub unsafe fn _mm_maskz_mov_epi8(k: __mmask16, a: __m128i) -> __m128i { let mov = a.as_i8x16(); @@ -7313,6 +7899,7 @@ pub unsafe fn _mm_maskz_mov_epi8(k: __mmask16, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_set1_epi16&expand=4942) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastw))] pub unsafe fn _mm512_mask_set1_epi16(src: __m512i, k: __mmask32, a: i16) -> __m512i { let r = _mm512_set1_epi16(a).as_i16x32(); @@ -7324,6 +7911,7 @@ pub unsafe fn _mm512_mask_set1_epi16(src: __m512i, k: __mmask32, a: i16) -> __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_set1_epi16&expand=4943) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastw))] pub unsafe fn _mm512_maskz_set1_epi16(k: __mmask32, a: i16) -> __m512i { let r = _mm512_set1_epi16(a).as_i16x32(); @@ -7336,6 +7924,7 @@ pub unsafe fn _mm512_maskz_set1_epi16(k: __mmask32, a: i16) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_set1_epi16&expand=4939) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastw))] pub unsafe fn _mm256_mask_set1_epi16(src: __m256i, k: __mmask16, a: i16) -> __m256i { let r = _mm256_set1_epi16(a).as_i16x16(); @@ -7347,6 +7936,7 @@ pub unsafe fn _mm256_mask_set1_epi16(src: __m256i, k: __mmask16, a: i16) -> __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_set1_epi16&expand=4940) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastw))] pub unsafe fn _mm256_maskz_set1_epi16(k: __mmask16, a: i16) -> __m256i { let r = _mm256_set1_epi16(a).as_i16x16(); @@ -7359,6 +7949,7 @@ pub unsafe fn _mm256_maskz_set1_epi16(k: __mmask16, a: i16) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_set1_epi16&expand=4936) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastw))] pub unsafe fn _mm_mask_set1_epi16(src: __m128i, k: __mmask8, a: i16) -> __m128i { let r = _mm_set1_epi16(a).as_i16x8(); @@ -7370,6 +7961,7 @@ pub unsafe fn _mm_mask_set1_epi16(src: __m128i, k: __mmask8, a: i16) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_set1_epi16&expand=4937) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastw))] pub unsafe fn _mm_maskz_set1_epi16(k: __mmask8, a: i16) -> __m128i { let r = _mm_set1_epi16(a).as_i16x8(); @@ -7382,6 +7974,7 @@ pub unsafe fn _mm_maskz_set1_epi16(k: __mmask8, a: i16) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_set1_epi8&expand=4970) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] pub unsafe fn _mm512_mask_set1_epi8(src: __m512i, k: __mmask64, a: i8) -> __m512i { let r = _mm512_set1_epi8(a).as_i8x64(); @@ -7393,6 +7986,7 @@ pub unsafe fn _mm512_mask_set1_epi8(src: __m512i, k: __mmask64, a: i8) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_set1_epi8&expand=4971) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] pub unsafe fn _mm512_maskz_set1_epi8(k: __mmask64, a: i8) -> __m512i { let r = _mm512_set1_epi8(a).as_i8x64(); @@ -7405,6 +7999,7 @@ pub unsafe fn _mm512_maskz_set1_epi8(k: __mmask64, a: i8) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_set1_epi8&expand=4967) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] pub unsafe fn _mm256_mask_set1_epi8(src: __m256i, k: __mmask32, a: i8) -> __m256i { let r = _mm256_set1_epi8(a).as_i8x32(); @@ -7416,6 +8011,7 @@ pub unsafe fn _mm256_mask_set1_epi8(src: __m256i, k: __mmask32, a: i8) -> __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_set1_epi8&expand=4968) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] pub unsafe fn _mm256_maskz_set1_epi8(k: __mmask32, a: i8) -> __m256i { let r = _mm256_set1_epi8(a).as_i8x32(); @@ -7428,6 +8024,7 @@ pub unsafe fn _mm256_maskz_set1_epi8(k: __mmask32, a: i8) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_set1_epi8&expand=4964) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] pub unsafe fn _mm_mask_set1_epi8(src: __m128i, k: __mmask16, a: i8) -> __m128i { let r = _mm_set1_epi8(a).as_i8x16(); @@ -7439,6 +8036,7 @@ pub unsafe fn _mm_mask_set1_epi8(src: __m128i, k: __mmask16, a: i8) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_set1_epi8&expand=4965) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] pub unsafe fn _mm_maskz_set1_epi8(k: __mmask16, a: i8) -> __m128i { let r = _mm_set1_epi8(a).as_i8x16(); @@ -7451,6 +8049,7 @@ pub unsafe fn _mm_maskz_set1_epi8(k: __mmask16, a: i8) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shufflelo_epi16&expand=5221) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 0))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_shufflelo_epi16(a: __m512i) -> __m512i { @@ -7502,6 +8101,7 @@ pub unsafe fn _mm512_shufflelo_epi16(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shufflelo_epi16&expand=5219) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_shufflelo_epi16( @@ -7519,6 +8119,7 @@ pub unsafe fn _mm512_mask_shufflelo_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shufflelo_epi16&expand=5220) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_shufflelo_epi16(k: __mmask32, a: __m512i) -> __m512i { @@ -7533,6 +8134,7 @@ pub unsafe fn _mm512_maskz_shufflelo_epi16(k: __mmask32, a: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shufflelo_epi16&expand=5216) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_shufflelo_epi16( @@ -7550,6 +8152,7 @@ pub unsafe fn _mm256_mask_shufflelo_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shufflelo_epi16&expand=5217) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_shufflelo_epi16(k: __mmask16, a: __m256i) -> __m256i { @@ -7564,6 +8167,7 @@ pub unsafe fn _mm256_maskz_shufflelo_epi16(k: __mmask16, a: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shufflelo_epi16&expand=5213) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_shufflelo_epi16( @@ -7581,6 +8185,7 @@ pub unsafe fn _mm_mask_shufflelo_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shufflelo_epi16&expand=5214) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_shufflelo_epi16(k: __mmask8, a: __m128i) -> __m128i { @@ -7595,6 +8200,7 @@ pub unsafe fn _mm_maskz_shufflelo_epi16(k: __mmask8, a: __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shufflehi_epi16&expand=5212) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 0))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_shufflehi_epi16(a: __m512i) -> __m512i { @@ -7646,6 +8252,7 @@ pub unsafe fn _mm512_shufflehi_epi16(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shufflehi_epi16&expand=5210) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_shufflehi_epi16( @@ -7663,6 +8270,7 @@ pub unsafe fn _mm512_mask_shufflehi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shufflehi_epi16&expand=5211) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_shufflehi_epi16(k: __mmask32, a: __m512i) -> __m512i { @@ -7677,6 +8285,7 @@ pub unsafe fn _mm512_maskz_shufflehi_epi16(k: __mmask32, a: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shufflehi_epi16&expand=5207) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_shufflehi_epi16( @@ -7694,6 +8303,7 @@ pub unsafe fn _mm256_mask_shufflehi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shufflehi_epi16&expand=5208) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_shufflehi_epi16(k: __mmask16, a: __m256i) -> __m256i { @@ -7708,6 +8318,7 @@ pub unsafe fn _mm256_maskz_shufflehi_epi16(k: __mmask16, a: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shufflehi_epi16&expand=5204) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_shufflehi_epi16( @@ -7725,6 +8336,7 @@ pub unsafe fn _mm_mask_shufflehi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shufflehi_epi16&expand=5205) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_shufflehi_epi16(k: __mmask8, a: __m128i) -> __m128i { @@ -7739,6 +8351,7 @@ pub unsafe fn _mm_maskz_shufflehi_epi16(k: __mmask8, a: __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shuffle_epi8&expand=5159) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufb))] pub unsafe fn _mm512_shuffle_epi8(a: __m512i, b: __m512i) -> __m512i { transmute(vpshufb(a.as_i8x64(), b.as_i8x64())) @@ -7749,6 +8362,7 @@ pub unsafe fn _mm512_shuffle_epi8(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shuffle_epi8&expand=5157) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufb))] pub unsafe fn _mm512_mask_shuffle_epi8( src: __m512i, @@ -7765,6 +8379,7 @@ pub unsafe fn _mm512_mask_shuffle_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shuffle_epi8&expand=5158) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufb))] pub unsafe fn _mm512_maskz_shuffle_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let shuffle = _mm512_shuffle_epi8(a, b).as_i8x64(); @@ -7777,6 +8392,7 @@ pub unsafe fn _mm512_maskz_shuffle_epi8(k: __mmask64, a: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shuffle_epi8&expand=5154) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufb))] pub unsafe fn _mm256_mask_shuffle_epi8( src: __m256i, @@ -7793,6 +8409,7 @@ pub unsafe fn _mm256_mask_shuffle_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shuffle_epi8&expand=5155) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufb))] pub unsafe fn _mm256_maskz_shuffle_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let shuffle = _mm256_shuffle_epi8(a, b).as_i8x32(); @@ -7805,6 +8422,7 @@ pub unsafe fn _mm256_maskz_shuffle_epi8(k: __mmask32, a: __m256i, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shuffle_epi8&expand=5151) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufb))] pub unsafe fn _mm_mask_shuffle_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let shuffle = _mm_shuffle_epi8(a, b).as_i8x16(); @@ -7816,6 +8434,7 @@ pub unsafe fn _mm_mask_shuffle_epi8(src: __m128i, k: __mmask16, a: __m128i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shuffle_epi8&expand=5152) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufb))] pub unsafe fn _mm_maskz_shuffle_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let shuffle = _mm_shuffle_epi8(a, b).as_i8x16(); @@ -7828,6 +8447,7 @@ pub unsafe fn _mm_maskz_shuffle_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_test_epi16_mask&expand=5884) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmw))] pub unsafe fn _mm512_test_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { let and = _mm512_and_si512(a, b); @@ -7840,6 +8460,7 @@ pub unsafe fn _mm512_test_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_test_epi16_mask&expand=5883) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmw))] pub unsafe fn _mm512_mask_test_epi16_mask(k: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { let and = _mm512_and_si512(a, b); @@ -7852,6 +8473,7 @@ pub unsafe fn _mm512_mask_test_epi16_mask(k: __mmask32, a: __m512i, b: __m512i) // [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_test_epi16_mask&expand=5882) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmw))] pub unsafe fn _mm256_test_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { let and = _mm256_and_si256(a, b); @@ -7864,6 +8486,7 @@ pub unsafe fn _mm256_test_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_test_epi16_mask&expand=5881) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmw))] pub unsafe fn _mm256_mask_test_epi16_mask(k: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { let and = _mm256_and_si256(a, b); @@ -7876,6 +8499,7 @@ pub unsafe fn _mm256_mask_test_epi16_mask(k: __mmask16, a: __m256i, b: __m256i) // [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_epi16_mask&expand=5880) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmw))] pub unsafe fn _mm_test_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); @@ -7888,6 +8512,7 @@ pub unsafe fn _mm_test_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_test_epi16_mask&expand=5879) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmw))] pub unsafe fn _mm_mask_test_epi16_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); @@ -7900,6 +8525,7 @@ pub unsafe fn _mm_mask_test_epi16_mask(k: __mmask8, a: __m128i, b: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_test_epi8_mask&expand=5902) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmb))] pub unsafe fn _mm512_test_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { let and = _mm512_and_si512(a, b); @@ -7912,6 +8538,7 @@ pub unsafe fn _mm512_test_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_test_epi8_mask&expand=5901) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmb))] pub unsafe fn _mm512_mask_test_epi8_mask(k: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { let and = _mm512_and_si512(a, b); @@ -7924,6 +8551,7 @@ pub unsafe fn _mm512_mask_test_epi8_mask(k: __mmask64, a: __m512i, b: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_test_epi8_mask&expand=5900) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmb))] pub unsafe fn _mm256_test_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { let and = _mm256_and_si256(a, b); @@ -7936,6 +8564,7 @@ pub unsafe fn _mm256_test_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_test_epi8_mask&expand=5899) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmb))] pub unsafe fn _mm256_mask_test_epi8_mask(k: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { let and = _mm256_and_si256(a, b); @@ -7948,6 +8577,7 @@ pub unsafe fn _mm256_mask_test_epi8_mask(k: __mmask32, a: __m256i, b: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_epi8_mask&expand=5898) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmb))] pub unsafe fn _mm_test_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { let and = _mm_and_si128(a, b); @@ -7960,6 +8590,7 @@ pub unsafe fn _mm_test_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_test_epi8_mask&expand=5897) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmb))] pub unsafe fn _mm_mask_test_epi8_mask(k: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { let and = _mm_and_si128(a, b); @@ -7972,6 +8603,7 @@ pub unsafe fn _mm_mask_test_epi8_mask(k: __mmask16, a: __m128i, b: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_testn_epi16_mask&expand=5915) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmw))] pub unsafe fn _mm512_testn_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { let and = _mm512_and_si512(a, b); @@ -7984,6 +8616,7 @@ pub unsafe fn _mm512_testn_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_testn_epi16&expand=5914) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmw))] pub unsafe fn _mm512_mask_testn_epi16_mask(k: __mmask32, a: __m512i, b: __m512i) -> __mmask32 { let and = _mm512_and_si512(a, b); @@ -7996,6 +8629,7 @@ pub unsafe fn _mm512_mask_testn_epi16_mask(k: __mmask32, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_testn_epi16_mask&expand=5913) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmw))] pub unsafe fn _mm256_testn_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { let and = _mm256_and_si256(a, b); @@ -8008,6 +8642,7 @@ pub unsafe fn _mm256_testn_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_testn_epi16_mask&expand=5912) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmw))] pub unsafe fn _mm256_mask_testn_epi16_mask(k: __mmask16, a: __m256i, b: __m256i) -> __mmask16 { let and = _mm256_and_si256(a, b); @@ -8020,6 +8655,7 @@ pub unsafe fn _mm256_mask_testn_epi16_mask(k: __mmask16, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testn_epi16_mask&expand=5911) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmw))] pub unsafe fn _mm_testn_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); @@ -8032,6 +8668,7 @@ pub unsafe fn _mm_testn_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_testn_epi16_mask&expand=5910) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmw))] pub unsafe fn _mm_mask_testn_epi16_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); @@ -8044,6 +8681,7 @@ pub unsafe fn _mm_mask_testn_epi16_mask(k: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_testn_epi8_mask&expand=5933) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmb))] pub unsafe fn _mm512_testn_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { let and = _mm512_and_si512(a, b); @@ -8056,6 +8694,7 @@ pub unsafe fn _mm512_testn_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_testn_epi8_mask&expand=5932) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmb))] pub unsafe fn _mm512_mask_testn_epi8_mask(k: __mmask64, a: __m512i, b: __m512i) -> __mmask64 { let and = _mm512_and_si512(a, b); @@ -8068,6 +8707,7 @@ pub unsafe fn _mm512_mask_testn_epi8_mask(k: __mmask64, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_testn_epi8_mask&expand=5931) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmb))] pub unsafe fn _mm256_testn_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { let and = _mm256_and_si256(a, b); @@ -8080,6 +8720,7 @@ pub unsafe fn _mm256_testn_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_testn_epi8_mask&expand=5930) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmb))] pub unsafe fn _mm256_mask_testn_epi8_mask(k: __mmask32, a: __m256i, b: __m256i) -> __mmask32 { let and = _mm256_and_si256(a, b); @@ -8092,6 +8733,7 @@ pub unsafe fn _mm256_mask_testn_epi8_mask(k: __mmask32, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testn_epi8_mask&expand=5929) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmb))] pub unsafe fn _mm_testn_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { let and = _mm_and_si128(a, b); @@ -8104,6 +8746,7 @@ pub unsafe fn _mm_testn_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_testn_epi8_mask&expand=5928) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmb))] pub unsafe fn _mm_mask_testn_epi8_mask(k: __mmask16, a: __m128i, b: __m128i) -> __mmask16 { let and = _mm_and_si128(a, b); @@ -8116,6 +8759,7 @@ pub unsafe fn _mm_mask_testn_epi8_mask(k: __mmask16, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_store_mask64&expand=5578) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(mov))] //should be kmovq pub unsafe fn _store_mask64(mem_addr: *mut u64, a: __mmask64) { ptr::write(mem_addr as *mut __mmask64, a); @@ -8126,6 +8770,7 @@ pub unsafe fn _store_mask64(mem_addr: *mut u64, a: __mmask64) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_store_mask32&expand=5577) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(mov))] //should be kmovd pub unsafe fn _store_mask32(mem_addr: *mut u32, a: __mmask32) { ptr::write(mem_addr as *mut __mmask32, a); @@ -8136,6 +8781,7 @@ pub unsafe fn _store_mask32(mem_addr: *mut u32, a: __mmask32) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_load_mask64&expand=3318) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(mov))] //should be kmovq pub unsafe fn _load_mask64(mem_addr: *const u64) -> __mmask64 { ptr::read(mem_addr as *const __mmask64) @@ -8146,6 +8792,7 @@ pub unsafe fn _load_mask64(mem_addr: *const u64) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_load_mask32&expand=3317) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(mov))] //should be kmovd pub unsafe fn _load_mask32(mem_addr: *const u32) -> __mmask32 { ptr::read(mem_addr as *const __mmask32) @@ -8156,6 +8803,7 @@ pub unsafe fn _load_mask32(mem_addr: *const u32) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sad_epu8&expand=4855) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsadbw))] pub unsafe fn _mm512_sad_epu8(a: __m512i, b: __m512i) -> __m512i { transmute(vpsadbw(a.as_u8x64(), b.as_u8x64())) @@ -8166,6 +8814,7 @@ pub unsafe fn _mm512_sad_epu8(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_dbsad_epu8&expand=2114) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub unsafe fn _mm512_dbsad_epu8(a: __m512i, b: __m512i) -> __m512i { @@ -8181,6 +8830,7 @@ pub unsafe fn _mm512_dbsad_epu8(a: __m512i, b: __m512i) -> __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_dbsad_epu8&expand=2115) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(4)] #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub unsafe fn _mm512_mask_dbsad_epu8( @@ -8201,6 +8851,7 @@ pub unsafe fn _mm512_mask_dbsad_epu8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_dbsad_epu8&expand=2116) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub unsafe fn _mm512_maskz_dbsad_epu8( @@ -8224,6 +8875,7 @@ pub unsafe fn _mm512_maskz_dbsad_epu8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_dbsad_epu8&expand=2111) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub unsafe fn _mm256_dbsad_epu8(a: __m256i, b: __m256i) -> __m256i { @@ -8239,6 +8891,7 @@ pub unsafe fn _mm256_dbsad_epu8(a: __m256i, b: __m256i) -> __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_dbsad_epu8&expand=2112) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(4)] #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub unsafe fn _mm256_mask_dbsad_epu8( @@ -8259,6 +8912,7 @@ pub unsafe fn _mm256_mask_dbsad_epu8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_dbsad_epu8&expand=2113) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub unsafe fn _mm256_maskz_dbsad_epu8( @@ -8282,6 +8936,7 @@ pub unsafe fn _mm256_maskz_dbsad_epu8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dbsad_epu8&expand=2108) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub unsafe fn _mm_dbsad_epu8(a: __m128i, b: __m128i) -> __m128i { @@ -8297,6 +8952,7 @@ pub unsafe fn _mm_dbsad_epu8(a: __m128i, b: __m128i) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_dbsad_epu8&expand=2109) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(4)] #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub unsafe fn _mm_mask_dbsad_epu8( @@ -8317,6 +8973,7 @@ pub unsafe fn _mm_mask_dbsad_epu8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_dbsad_epu8&expand=2110) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))] pub unsafe fn _mm_maskz_dbsad_epu8( @@ -8336,6 +8993,7 @@ pub unsafe fn _mm_maskz_dbsad_epu8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_movepi16_mask&expand=3873) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovw2m))] pub unsafe fn _mm512_movepi16_mask(a: __m512i) -> __mmask32 { let filter = _mm512_set1_epi16(1 << 15); @@ -8348,6 +9006,7 @@ pub unsafe fn _mm512_movepi16_mask(a: __m512i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_movepi16_mask&expand=3872) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovw2m))] pub unsafe fn _mm256_movepi16_mask(a: __m256i) -> __mmask16 { let filter = _mm256_set1_epi16(1 << 15); @@ -8360,6 +9019,7 @@ pub unsafe fn _mm256_movepi16_mask(a: __m256i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movepi16_mask&expand=3871) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovw2m))] pub unsafe fn _mm_movepi16_mask(a: __m128i) -> __mmask8 { let filter = _mm_set1_epi16(1 << 15); @@ -8372,6 +9032,7 @@ pub unsafe fn _mm_movepi16_mask(a: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_movepi8_mask&expand=3883) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovb2m))] pub unsafe fn _mm512_movepi8_mask(a: __m512i) -> __mmask64 { let filter = _mm512_set1_epi8(1 << 7); @@ -8384,6 +9045,7 @@ pub unsafe fn _mm512_movepi8_mask(a: __m512i) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_movepi8_mask&expand=3882) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovmskb))] // should be vpmovb2m but compiled to vpmovmskb in the test shim because that takes less cycles than // using vpmovb2m plus converting the mask register to a standard register. pub unsafe fn _mm256_movepi8_mask(a: __m256i) -> __mmask32 { @@ -8397,6 +9059,7 @@ pub unsafe fn _mm256_movepi8_mask(a: __m256i) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movepi8_mask&expand=3881) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovmskb))] // should be vpmovb2m but compiled to vpmovmskb in the test shim because that takes less cycles than // using vpmovb2m plus converting the mask register to a standard register. pub unsafe fn _mm_movepi8_mask(a: __m128i) -> __mmask16 { @@ -8410,6 +9073,7 @@ pub unsafe fn _mm_movepi8_mask(a: __m128i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_movm_epi16&expand=3886) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovm2w))] pub unsafe fn _mm512_movm_epi16(k: __mmask32) -> __m512i { let one = _mm512_set1_epi16( @@ -8440,6 +9104,7 @@ pub unsafe fn _mm512_movm_epi16(k: __mmask32) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_movm_epi16&expand=3885) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovm2w))] pub unsafe fn _mm256_movm_epi16(k: __mmask16) -> __m256i { let one = _mm256_set1_epi16( @@ -8470,6 +9135,7 @@ pub unsafe fn _mm256_movm_epi16(k: __mmask16) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movm_epi16&expand=3884) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovm2w))] pub unsafe fn _mm_movm_epi16(k: __mmask8) -> __m128i { let one = _mm_set1_epi16( @@ -8500,6 +9166,7 @@ pub unsafe fn _mm_movm_epi16(k: __mmask8) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_movm_epi8&expand=3895) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovm2b))] pub unsafe fn _mm512_movm_epi8(k: __mmask64) -> __m512i { let one = @@ -8514,6 +9181,7 @@ pub unsafe fn _mm512_movm_epi8(k: __mmask64) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_movm_epi8&expand=3894) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovm2b))] pub unsafe fn _mm256_movm_epi8(k: __mmask32) -> __m256i { let one = @@ -8528,6 +9196,7 @@ pub unsafe fn _mm256_movm_epi8(k: __mmask32) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movm_epi8&expand=3893) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovm2b))] pub unsafe fn _mm_movm_epi8(k: __mmask16) -> __m128i { let one = _mm_set1_epi8(1 << 7 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0) @@ -8541,6 +9210,7 @@ pub unsafe fn _mm_movm_epi8(k: __mmask16) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kadd_mask32&expand=3207) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _kadd_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { a + b } @@ -8550,6 +9220,7 @@ pub unsafe fn _kadd_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kadd_mask64&expand=3208) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _kadd_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { a + b } @@ -8559,6 +9230,7 @@ pub unsafe fn _kadd_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kand_mask32&expand=3213) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _kand_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { a & b } @@ -8568,6 +9240,7 @@ pub unsafe fn _kand_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kand_mask64&expand=3214) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _kand_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { a & b } @@ -8577,6 +9250,7 @@ pub unsafe fn _kand_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_knot_mask32&expand=3234) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _knot_mask32(a: __mmask32) -> __mmask32 { a ^ 0b11111111_11111111_11111111_11111111 } @@ -8586,6 +9260,7 @@ pub unsafe fn _knot_mask32(a: __mmask32) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_knot_mask64&expand=3235) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _knot_mask64(a: __mmask64) -> __mmask64 { a ^ 0b11111111_11111111_11111111_11111111_11111111_11111111_11111111_11111111 } @@ -8595,6 +9270,7 @@ pub unsafe fn _knot_mask64(a: __mmask64) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kandn_mask32&expand=3219) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _kandn_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { _knot_mask32(a) & b } @@ -8604,6 +9280,7 @@ pub unsafe fn _kandn_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kandn_mask64&expand=3220) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _kandn_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { _knot_mask64(a) & b } @@ -8613,6 +9290,7 @@ pub unsafe fn _kandn_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kor_mask32&expand=3240) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _kor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { a | b } @@ -8622,6 +9300,7 @@ pub unsafe fn _kor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kor_mask64&expand=3241) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _kor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { a | b } @@ -8631,6 +9310,7 @@ pub unsafe fn _kor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kxor_mask32&expand=3292) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _kxor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { a ^ b } @@ -8640,6 +9320,7 @@ pub unsafe fn _kxor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kxor_mask64&expand=3293) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _kxor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { a ^ b } @@ -8649,6 +9330,7 @@ pub unsafe fn _kxor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kxnor_mask32&expand=3286) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _kxnor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { _knot_mask32(a ^ b) } @@ -8658,6 +9340,7 @@ pub unsafe fn _kxnor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kxnor_mask64&expand=3287) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _kxnor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { _knot_mask64(a ^ b) } @@ -8667,6 +9350,7 @@ pub unsafe fn _kxnor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi16_epi8&expand=1407) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovwb))] pub unsafe fn _mm512_cvtepi16_epi8(a: __m512i) -> __m256i { let a = a.as_i16x32(); @@ -8678,6 +9362,7 @@ pub unsafe fn _mm512_cvtepi16_epi8(a: __m512i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi16_epi8&expand=1408) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovwb))] pub unsafe fn _mm512_mask_cvtepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) -> __m256i { let convert = _mm512_cvtepi16_epi8(a).as_i8x32(); @@ -8689,6 +9374,7 @@ pub unsafe fn _mm512_mask_cvtepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi16_epi8&expand=1409) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovwb))] pub unsafe fn _mm512_maskz_cvtepi16_epi8(k: __mmask32, a: __m512i) -> __m256i { let convert = _mm512_cvtepi16_epi8(a).as_i8x32(); @@ -8704,6 +9390,7 @@ pub unsafe fn _mm512_maskz_cvtepi16_epi8(k: __mmask32, a: __m512i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi16_epi8&expand=1404) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovwb))] pub unsafe fn _mm256_cvtepi16_epi8(a: __m256i) -> __m128i { let a = a.as_i16x16(); @@ -8715,6 +9402,7 @@ pub unsafe fn _mm256_cvtepi16_epi8(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi16_epi8&expand=1405) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovwb))] pub unsafe fn _mm256_mask_cvtepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) -> __m128i { let convert = _mm256_cvtepi16_epi8(a).as_i8x16(); @@ -8726,6 +9414,7 @@ pub unsafe fn _mm256_mask_cvtepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi16_epi8&expand=1406) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovwb))] pub unsafe fn _mm256_maskz_cvtepi16_epi8(k: __mmask16, a: __m256i) -> __m128i { let convert = _mm256_cvtepi16_epi8(a).as_i8x16(); @@ -8741,6 +9430,7 @@ pub unsafe fn _mm256_maskz_cvtepi16_epi8(k: __mmask16, a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi16_epi8&expand=1401) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovwb))] pub unsafe fn _mm_cvtepi16_epi8(a: __m128i) -> __m128i { let a = a.as_i16x8(); @@ -8754,6 +9444,7 @@ pub unsafe fn _mm_cvtepi16_epi8(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi16_epi8&expand=1402) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovwb))] pub unsafe fn _mm_mask_cvtepi16_epi8(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepi16_epi8(a).as_i8x16(); @@ -8766,6 +9457,7 @@ pub unsafe fn _mm_mask_cvtepi16_epi8(src: __m128i, k: __mmask8, a: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi16_epi8&expand=1403) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovwb))] pub unsafe fn _mm_maskz_cvtepi16_epi8(k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepi16_epi8(a).as_i8x16(); @@ -8779,6 +9471,7 @@ pub unsafe fn _mm_maskz_cvtepi16_epi8(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtsepi16_epi8&expand=1807) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovswb))] pub unsafe fn _mm512_cvtsepi16_epi8(a: __m512i) -> __m256i { transmute(vpmovswb( @@ -8793,6 +9486,7 @@ pub unsafe fn _mm512_cvtsepi16_epi8(a: __m512i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi16_epi8&expand=1808) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovswb))] pub unsafe fn _mm512_mask_cvtsepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) -> __m256i { transmute(vpmovswb(a.as_i16x32(), src.as_i8x32(), k)) @@ -8803,6 +9497,7 @@ pub unsafe fn _mm512_mask_cvtsepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtsepi16_epi8&expand=1809) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovswb))] pub unsafe fn _mm512_maskz_cvtsepi16_epi8(k: __mmask32, a: __m512i) -> __m256i { transmute(vpmovswb( @@ -8817,6 +9512,7 @@ pub unsafe fn _mm512_maskz_cvtsepi16_epi8(k: __mmask32, a: __m512i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtsepi16_epi8&expand=1804) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovswb))] pub unsafe fn _mm256_cvtsepi16_epi8(a: __m256i) -> __m128i { transmute(vpmovswb256( @@ -8831,6 +9527,7 @@ pub unsafe fn _mm256_cvtsepi16_epi8(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi16_epi8&expand=1805) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovswb))] pub unsafe fn _mm256_mask_cvtsepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) -> __m128i { transmute(vpmovswb256(a.as_i16x16(), src.as_i8x16(), k)) @@ -8841,6 +9538,7 @@ pub unsafe fn _mm256_mask_cvtsepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtsepi16_epi8&expand=1806) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovswb))] pub unsafe fn _mm256_maskz_cvtsepi16_epi8(k: __mmask16, a: __m256i) -> __m128i { transmute(vpmovswb256( @@ -8855,6 +9553,7 @@ pub unsafe fn _mm256_maskz_cvtsepi16_epi8(k: __mmask16, a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsepi16_epi8&expand=1801) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovswb))] pub unsafe fn _mm_cvtsepi16_epi8(a: __m128i) -> __m128i { transmute(vpmovswb128( @@ -8869,6 +9568,7 @@ pub unsafe fn _mm_cvtsepi16_epi8(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi16_epi8&expand=1802) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovswb))] pub unsafe fn _mm_mask_cvtsepi16_epi8(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovswb128(a.as_i16x8(), src.as_i8x16(), k)) @@ -8879,6 +9579,7 @@ pub unsafe fn _mm_mask_cvtsepi16_epi8(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtsepi16_epi8&expand=1803) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovswb))] pub unsafe fn _mm_maskz_cvtsepi16_epi8(k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovswb128(a.as_i16x8(), _mm_setzero_si128().as_i8x16(), k)) @@ -8889,6 +9590,7 @@ pub unsafe fn _mm_maskz_cvtsepi16_epi8(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtusepi16_epi8&expand=2042) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovuswb))] pub unsafe fn _mm512_cvtusepi16_epi8(a: __m512i) -> __m256i { transmute(vpmovuswb( @@ -8903,6 +9605,7 @@ pub unsafe fn _mm512_cvtusepi16_epi8(a: __m512i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi16_epi8&expand=2043) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovuswb))] pub unsafe fn _mm512_mask_cvtusepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) -> __m256i { transmute(vpmovuswb(a.as_u16x32(), src.as_u8x32(), k)) @@ -8913,6 +9616,7 @@ pub unsafe fn _mm512_mask_cvtusepi16_epi8(src: __m256i, k: __mmask32, a: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtusepi16_epi8&expand=2044) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovuswb))] pub unsafe fn _mm512_maskz_cvtusepi16_epi8(k: __mmask32, a: __m512i) -> __m256i { transmute(vpmovuswb( @@ -8927,6 +9631,7 @@ pub unsafe fn _mm512_maskz_cvtusepi16_epi8(k: __mmask32, a: __m512i) -> __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtusepi16_epi8&expand=2039) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovuswb))] pub unsafe fn _mm256_cvtusepi16_epi8(a: __m256i) -> __m128i { transmute(vpmovuswb256( @@ -8941,6 +9646,7 @@ pub unsafe fn _mm256_cvtusepi16_epi8(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi16_epi8&expand=2040) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovuswb))] pub unsafe fn _mm256_mask_cvtusepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) -> __m128i { transmute(vpmovuswb256(a.as_u16x16(), src.as_u8x16(), k)) @@ -8951,6 +9657,7 @@ pub unsafe fn _mm256_mask_cvtusepi16_epi8(src: __m128i, k: __mmask16, a: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtusepi16_epi8&expand=2041) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovuswb))] pub unsafe fn _mm256_maskz_cvtusepi16_epi8(k: __mmask16, a: __m256i) -> __m128i { transmute(vpmovuswb256( @@ -8965,6 +9672,7 @@ pub unsafe fn _mm256_maskz_cvtusepi16_epi8(k: __mmask16, a: __m256i) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtusepi16_epi8&expand=2036) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovuswb))] pub unsafe fn _mm_cvtusepi16_epi8(a: __m128i) -> __m128i { transmute(vpmovuswb128( @@ -8979,6 +9687,7 @@ pub unsafe fn _mm_cvtusepi16_epi8(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi16_epi8&expand=2037) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovuswb))] pub unsafe fn _mm_mask_cvtusepi16_epi8(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovuswb128(a.as_u16x8(), src.as_u8x16(), k)) @@ -8989,6 +9698,7 @@ pub unsafe fn _mm_mask_cvtusepi16_epi8(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtusepi16_epi8&expand=2038) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovuswb))] pub unsafe fn _mm_maskz_cvtusepi16_epi8(k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovuswb128( @@ -9003,6 +9713,7 @@ pub unsafe fn _mm_maskz_cvtusepi16_epi8(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi8_epi16&expand=1526) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbw))] pub unsafe fn _mm512_cvtepi8_epi16(a: __m256i) -> __m512i { let a = a.as_i8x32(); @@ -9014,6 +9725,7 @@ pub unsafe fn _mm512_cvtepi8_epi16(a: __m256i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi8_epi16&expand=1527) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbw))] pub unsafe fn _mm512_mask_cvtepi8_epi16(src: __m512i, k: __mmask32, a: __m256i) -> __m512i { let convert = _mm512_cvtepi8_epi16(a).as_i16x32(); @@ -9025,6 +9737,7 @@ pub unsafe fn _mm512_mask_cvtepi8_epi16(src: __m512i, k: __mmask32, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi8_epi16&expand=1528) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbw))] pub unsafe fn _mm512_maskz_cvtepi8_epi16(k: __mmask32, a: __m256i) -> __m512i { let convert = _mm512_cvtepi8_epi16(a).as_i16x32(); @@ -9040,6 +9753,7 @@ pub unsafe fn _mm512_maskz_cvtepi8_epi16(k: __mmask32, a: __m256i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi8_epi16&expand=1524) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbw))] pub unsafe fn _mm256_mask_cvtepi8_epi16(src: __m256i, k: __mmask16, a: __m128i) -> __m256i { let convert = _mm256_cvtepi8_epi16(a).as_i16x16(); @@ -9051,6 +9765,7 @@ pub unsafe fn _mm256_mask_cvtepi8_epi16(src: __m256i, k: __mmask16, a: __m128i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi8_epi16&expand=1525) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbw))] pub unsafe fn _mm256_maskz_cvtepi8_epi16(k: __mmask16, a: __m128i) -> __m256i { let convert = _mm256_cvtepi8_epi16(a).as_i16x16(); @@ -9066,6 +9781,7 @@ pub unsafe fn _mm256_maskz_cvtepi8_epi16(k: __mmask16, a: __m128i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi8_epi16&expand=1521) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbw))] pub unsafe fn _mm_mask_cvtepi8_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepi8_epi16(a).as_i16x8(); @@ -9077,6 +9793,7 @@ pub unsafe fn _mm_mask_cvtepi8_epi16(src: __m128i, k: __mmask8, a: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi8_epi16&expand=1522) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbw))] pub unsafe fn _mm_maskz_cvtepi8_epi16(k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepi8_epi16(a).as_i16x8(); @@ -9092,6 +9809,7 @@ pub unsafe fn _mm_maskz_cvtepi8_epi16(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu8_epi16&expand=1612) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbw))] pub unsafe fn _mm512_cvtepu8_epi16(a: __m256i) -> __m512i { let a = a.as_u8x32(); @@ -9103,6 +9821,7 @@ pub unsafe fn _mm512_cvtepu8_epi16(a: __m256i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu8_epi16&expand=1613) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbw))] pub unsafe fn _mm512_mask_cvtepu8_epi16(src: __m512i, k: __mmask32, a: __m256i) -> __m512i { let convert = _mm512_cvtepu8_epi16(a).as_i16x32(); @@ -9114,6 +9833,7 @@ pub unsafe fn _mm512_mask_cvtepu8_epi16(src: __m512i, k: __mmask32, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepu8_epi16&expand=1614) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbw))] pub unsafe fn _mm512_maskz_cvtepu8_epi16(k: __mmask32, a: __m256i) -> __m512i { let convert = _mm512_cvtepu8_epi16(a).as_i16x32(); @@ -9129,6 +9849,7 @@ pub unsafe fn _mm512_maskz_cvtepu8_epi16(k: __mmask32, a: __m256i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepu8_epi16&expand=1610) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbw))] pub unsafe fn _mm256_mask_cvtepu8_epi16(src: __m256i, k: __mmask16, a: __m128i) -> __m256i { let convert = _mm256_cvtepu8_epi16(a).as_i16x16(); @@ -9140,6 +9861,7 @@ pub unsafe fn _mm256_mask_cvtepu8_epi16(src: __m256i, k: __mmask16, a: __m128i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepu8_epi16&expand=1611) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbw))] pub unsafe fn _mm256_maskz_cvtepu8_epi16(k: __mmask16, a: __m128i) -> __m256i { let convert = _mm256_cvtepu8_epi16(a).as_i16x16(); @@ -9155,6 +9877,7 @@ pub unsafe fn _mm256_maskz_cvtepu8_epi16(k: __mmask16, a: __m128i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepu8_epi16&expand=1607) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbw))] pub unsafe fn _mm_mask_cvtepu8_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepu8_epi16(a).as_i16x8(); @@ -9166,6 +9889,7 @@ pub unsafe fn _mm_mask_cvtepu8_epi16(src: __m128i, k: __mmask8, a: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepu8_epi16&expand=1608) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbw))] pub unsafe fn _mm_maskz_cvtepu8_epi16(k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepu8_epi16(a).as_i16x8(); @@ -9181,6 +9905,7 @@ pub unsafe fn _mm_maskz_cvtepu8_epi16(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_bslli_epi128&expand=591) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpslldq, IMM8 = 3))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_bslli_epi128(a: __m512i) -> __m512i { @@ -9273,6 +9998,7 @@ pub unsafe fn _mm512_bslli_epi128(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_bsrli_epi128&expand=594) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrldq, IMM8 = 3))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_bsrli_epi128(a: __m512i) -> __m512i { @@ -9447,6 +10173,7 @@ pub unsafe fn _mm512_bsrli_epi128(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_alignr_epi8&expand=263) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_alignr_epi8(a: __m512i, b: __m512i) -> __m512i { @@ -9633,6 +10360,7 @@ pub unsafe fn _mm512_alignr_epi8(a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_alignr_epi8&expand=264) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_alignr_epi8( @@ -9651,6 +10379,7 @@ pub unsafe fn _mm512_mask_alignr_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_alignr_epi8&expand=265) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_alignr_epi8( @@ -9669,6 +10398,7 @@ pub unsafe fn _mm512_maskz_alignr_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_alignr_epi8&expand=261) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(4)] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 5))] pub unsafe fn _mm256_mask_alignr_epi8( @@ -9687,6 +10417,7 @@ pub unsafe fn _mm256_mask_alignr_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_alignr_epi8&expand=262) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 5))] pub unsafe fn _mm256_maskz_alignr_epi8( @@ -9708,6 +10439,7 @@ pub unsafe fn _mm256_maskz_alignr_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_alignr_epi8&expand=258) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(4)] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 5))] pub unsafe fn _mm_mask_alignr_epi8( @@ -9726,6 +10458,7 @@ pub unsafe fn _mm_mask_alignr_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_alignr_epi8&expand=259) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 5))] pub unsafe fn _mm_maskz_alignr_epi8( @@ -9744,6 +10477,7 @@ pub unsafe fn _mm_maskz_alignr_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi16_storeu_epi8&expand=1812) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovswb))] pub unsafe fn _mm512_mask_cvtsepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32, a: __m512i) { vpmovswbmem(mem_addr, a.as_i16x32(), k); @@ -9754,6 +10488,7 @@ pub unsafe fn _mm512_mask_cvtsepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi16_storeu_epi8&expand=1811) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovswb))] pub unsafe fn _mm256_mask_cvtsepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16, a: __m256i) { vpmovswbmem256(mem_addr, a.as_i16x16(), k); @@ -9764,6 +10499,7 @@ pub unsafe fn _mm256_mask_cvtsepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi16_storeu_epi8&expand=1810) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovswb))] pub unsafe fn _mm_mask_cvtsepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovswbmem128(mem_addr, a.as_i16x8(), k); @@ -9774,6 +10510,7 @@ pub unsafe fn _mm_mask_cvtsepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi16_storeu_epi8&expand=1412) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovwb))] pub unsafe fn _mm512_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32, a: __m512i) { vpmovwbmem(mem_addr, a.as_i16x32(), k); @@ -9784,6 +10521,7 @@ pub unsafe fn _mm512_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi16_storeu_epi8&expand=1411) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovwb))] pub unsafe fn _mm256_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16, a: __m256i) { vpmovwbmem256(mem_addr, a.as_i16x16(), k); @@ -9794,6 +10532,7 @@ pub unsafe fn _mm256_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi16_storeu_epi8&expand=1410) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovwb))] pub unsafe fn _mm_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovwbmem128(mem_addr, a.as_i16x8(), k); @@ -9804,6 +10543,7 @@ pub unsafe fn _mm_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi16_storeu_epi8&expand=2047) #[inline] #[target_feature(enable = "avx512bw")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovuswb))] pub unsafe fn _mm512_mask_cvtusepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32, a: __m512i) { vpmovuswbmem(mem_addr, a.as_i16x32(), k); @@ -9814,6 +10554,7 @@ pub unsafe fn _mm512_mask_cvtusepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi16_storeu_epi8&expand=2046) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovuswb))] pub unsafe fn _mm256_mask_cvtusepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16, a: __m256i) { vpmovuswbmem256(mem_addr, a.as_i16x16(), k); @@ -9824,6 +10565,7 @@ pub unsafe fn _mm256_mask_cvtusepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi16_storeu_epi8&expand=2045) #[inline] #[target_feature(enable = "avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovuswb))] pub unsafe fn _mm_mask_cvtusepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovuswbmem128(mem_addr, a.as_i16x8(), k); diff --git a/crates/core_arch/src/x86/avx512cd.rs b/crates/core_arch/src/x86/avx512cd.rs index a54b547635..99d6daeffb 100644 --- a/crates/core_arch/src/x86/avx512cd.rs +++ b/crates/core_arch/src/x86/avx512cd.rs @@ -11,6 +11,7 @@ use stdarch_test::assert_instr; /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastmw_epi32&expand=553) #[inline] #[target_feature(enable = "avx512cd")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmw2d pub unsafe fn _mm512_broadcastmw_epi32(k: __mmask16) -> __m512i { _mm512_set1_epi32(k as i32) @@ -21,6 +22,7 @@ pub unsafe fn _mm512_broadcastmw_epi32(k: __mmask16) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcastmw_epi32&expand=552) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmw2d pub unsafe fn _mm256_broadcastmw_epi32(k: __mmask16) -> __m256i { _mm256_set1_epi32(k as i32) @@ -31,6 +33,7 @@ pub unsafe fn _mm256_broadcastmw_epi32(k: __mmask16) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_broadcastmw_epi32&expand=551) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmw2d pub unsafe fn _mm_broadcastmw_epi32(k: __mmask16) -> __m128i { _mm_set1_epi32(k as i32) @@ -41,6 +44,7 @@ pub unsafe fn _mm_broadcastmw_epi32(k: __mmask16) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastmb_epi64&expand=550) #[inline] #[target_feature(enable = "avx512cd")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmb2q pub unsafe fn _mm512_broadcastmb_epi64(k: __mmask8) -> __m512i { _mm512_set1_epi64(k as i64) @@ -51,6 +55,7 @@ pub unsafe fn _mm512_broadcastmb_epi64(k: __mmask8) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcastmb_epi64&expand=549) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmb2q pub unsafe fn _mm256_broadcastmb_epi64(k: __mmask8) -> __m256i { _mm256_set1_epi64x(k as i64) @@ -61,6 +66,7 @@ pub unsafe fn _mm256_broadcastmb_epi64(k: __mmask8) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_broadcastmb_epi64&expand=548) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmb2q pub unsafe fn _mm_broadcastmb_epi64(k: __mmask8) -> __m128i { _mm_set1_epi64x(k as i64) @@ -71,6 +77,7 @@ pub unsafe fn _mm_broadcastmb_epi64(k: __mmask8) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_conflict_epi32&expand=1248) #[inline] #[target_feature(enable = "avx512cd")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictd))] pub unsafe fn _mm512_conflict_epi32(a: __m512i) -> __m512i { transmute(vpconflictd(a.as_i32x16())) @@ -81,6 +88,7 @@ pub unsafe fn _mm512_conflict_epi32(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_conflict_epi32&expand=1249) #[inline] #[target_feature(enable = "avx512cd")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictd))] pub unsafe fn _mm512_mask_conflict_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { let conflict = _mm512_conflict_epi32(a).as_i32x16(); @@ -92,6 +100,7 @@ pub unsafe fn _mm512_mask_conflict_epi32(src: __m512i, k: __mmask16, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_conflict_epi32&expand=1250) #[inline] #[target_feature(enable = "avx512cd")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictd))] pub unsafe fn _mm512_maskz_conflict_epi32(k: __mmask16, a: __m512i) -> __m512i { let conflict = _mm512_conflict_epi32(a).as_i32x16(); @@ -104,6 +113,7 @@ pub unsafe fn _mm512_maskz_conflict_epi32(k: __mmask16, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_conflict_epi32&expand=1245) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictd))] pub unsafe fn _mm256_conflict_epi32(a: __m256i) -> __m256i { transmute(vpconflictd256(a.as_i32x8())) @@ -114,6 +124,7 @@ pub unsafe fn _mm256_conflict_epi32(a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_conflict_epi32&expand=1246) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictd))] pub unsafe fn _mm256_mask_conflict_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { let conflict = _mm256_conflict_epi32(a).as_i32x8(); @@ -125,6 +136,7 @@ pub unsafe fn _mm256_mask_conflict_epi32(src: __m256i, k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_conflict_epi32&expand=1247) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictd))] pub unsafe fn _mm256_maskz_conflict_epi32(k: __mmask8, a: __m256i) -> __m256i { let conflict = _mm256_conflict_epi32(a).as_i32x8(); @@ -137,6 +149,7 @@ pub unsafe fn _mm256_maskz_conflict_epi32(k: __mmask8, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_conflict_epi32&expand=1242) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictd))] pub unsafe fn _mm_conflict_epi32(a: __m128i) -> __m128i { transmute(vpconflictd128(a.as_i32x4())) @@ -147,6 +160,7 @@ pub unsafe fn _mm_conflict_epi32(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_conflict_epi32&expand=1243) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictd))] pub unsafe fn _mm_mask_conflict_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let conflict = _mm_conflict_epi32(a).as_i32x4(); @@ -158,6 +172,7 @@ pub unsafe fn _mm_mask_conflict_epi32(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_conflict_epi32&expand=1244) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictd))] pub unsafe fn _mm_maskz_conflict_epi32(k: __mmask8, a: __m128i) -> __m128i { let conflict = _mm_conflict_epi32(a).as_i32x4(); @@ -170,6 +185,7 @@ pub unsafe fn _mm_maskz_conflict_epi32(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_conflict_epi64&expand=1257) #[inline] #[target_feature(enable = "avx512cd")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictq))] pub unsafe fn _mm512_conflict_epi64(a: __m512i) -> __m512i { transmute(vpconflictq(a.as_i64x8())) @@ -180,6 +196,7 @@ pub unsafe fn _mm512_conflict_epi64(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_conflict_epi64&expand=1258) #[inline] #[target_feature(enable = "avx512cd")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictq))] pub unsafe fn _mm512_mask_conflict_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { let conflict = _mm512_conflict_epi64(a).as_i64x8(); @@ -191,6 +208,7 @@ pub unsafe fn _mm512_mask_conflict_epi64(src: __m512i, k: __mmask8, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_conflict_epi64&expand=1259) #[inline] #[target_feature(enable = "avx512cd")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictq))] pub unsafe fn _mm512_maskz_conflict_epi64(k: __mmask8, a: __m512i) -> __m512i { let conflict = _mm512_conflict_epi64(a).as_i64x8(); @@ -203,6 +221,7 @@ pub unsafe fn _mm512_maskz_conflict_epi64(k: __mmask8, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_conflict_epi64&expand=1254) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictq))] pub unsafe fn _mm256_conflict_epi64(a: __m256i) -> __m256i { transmute(vpconflictq256(a.as_i64x4())) @@ -213,6 +232,7 @@ pub unsafe fn _mm256_conflict_epi64(a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_conflict_epi64&expand=1255) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictq))] pub unsafe fn _mm256_mask_conflict_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { let conflict = _mm256_conflict_epi64(a).as_i64x4(); @@ -224,6 +244,7 @@ pub unsafe fn _mm256_mask_conflict_epi64(src: __m256i, k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_conflict_epi64&expand=1256) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictq))] pub unsafe fn _mm256_maskz_conflict_epi64(k: __mmask8, a: __m256i) -> __m256i { let conflict = _mm256_conflict_epi64(a).as_i64x4(); @@ -236,6 +257,7 @@ pub unsafe fn _mm256_maskz_conflict_epi64(k: __mmask8, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_conflict_epi64&expand=1251) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictq))] pub unsafe fn _mm_conflict_epi64(a: __m128i) -> __m128i { transmute(vpconflictq128(a.as_i64x2())) @@ -246,6 +268,7 @@ pub unsafe fn _mm_conflict_epi64(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_conflict_epi64&expand=1252) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictq))] pub unsafe fn _mm_mask_conflict_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let conflict = _mm_conflict_epi64(a).as_i64x2(); @@ -257,6 +280,7 @@ pub unsafe fn _mm_mask_conflict_epi64(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_conflict_epi64&expand=1253) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpconflictq))] pub unsafe fn _mm_maskz_conflict_epi64(k: __mmask8, a: __m128i) -> __m128i { let conflict = _mm_conflict_epi64(a).as_i64x2(); @@ -269,6 +293,7 @@ pub unsafe fn _mm_maskz_conflict_epi64(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_lzcnt_epi32&expand=3491) #[inline] #[target_feature(enable = "avx512cd")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntd))] pub unsafe fn _mm512_lzcnt_epi32(a: __m512i) -> __m512i { transmute(vplzcntd(a.as_i32x16(), false)) @@ -279,6 +304,7 @@ pub unsafe fn _mm512_lzcnt_epi32(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_lzcnt_epi32&expand=3492) #[inline] #[target_feature(enable = "avx512cd")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntd))] pub unsafe fn _mm512_mask_lzcnt_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { let zerocount = _mm512_lzcnt_epi32(a).as_i32x16(); @@ -290,6 +316,7 @@ pub unsafe fn _mm512_mask_lzcnt_epi32(src: __m512i, k: __mmask16, a: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_lzcnt_epi32&expand=3493) #[inline] #[target_feature(enable = "avx512cd")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntd))] pub unsafe fn _mm512_maskz_lzcnt_epi32(k: __mmask16, a: __m512i) -> __m512i { let zerocount = _mm512_lzcnt_epi32(a).as_i32x16(); @@ -302,6 +329,7 @@ pub unsafe fn _mm512_maskz_lzcnt_epi32(k: __mmask16, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_lzcnt_epi32&expand=3488) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntd))] pub unsafe fn _mm256_lzcnt_epi32(a: __m256i) -> __m256i { transmute(vplzcntd256(a.as_i32x8(), false)) @@ -312,6 +340,7 @@ pub unsafe fn _mm256_lzcnt_epi32(a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_lzcnt_epi32&expand=3489) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntd))] pub unsafe fn _mm256_mask_lzcnt_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { let zerocount = _mm256_lzcnt_epi32(a).as_i32x8(); @@ -323,6 +352,7 @@ pub unsafe fn _mm256_mask_lzcnt_epi32(src: __m256i, k: __mmask8, a: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_lzcnt_epi32&expand=3490) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntd))] pub unsafe fn _mm256_maskz_lzcnt_epi32(k: __mmask8, a: __m256i) -> __m256i { let zerocount = _mm256_lzcnt_epi32(a).as_i32x8(); @@ -335,6 +365,7 @@ pub unsafe fn _mm256_maskz_lzcnt_epi32(k: __mmask8, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_lzcnt_epi32&expand=3485) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntd))] pub unsafe fn _mm_lzcnt_epi32(a: __m128i) -> __m128i { transmute(vplzcntd128(a.as_i32x4(), false)) @@ -345,6 +376,7 @@ pub unsafe fn _mm_lzcnt_epi32(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_lzcnt_epi32&expand=3486) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntd))] pub unsafe fn _mm_mask_lzcnt_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let zerocount = _mm_lzcnt_epi32(a).as_i32x4(); @@ -356,6 +388,7 @@ pub unsafe fn _mm_mask_lzcnt_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_lzcnt_epi32&expand=3487) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntd))] pub unsafe fn _mm_maskz_lzcnt_epi32(k: __mmask8, a: __m128i) -> __m128i { let zerocount = _mm_lzcnt_epi32(a).as_i32x4(); @@ -368,6 +401,7 @@ pub unsafe fn _mm_maskz_lzcnt_epi32(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_lzcnt_epi64&expand=3500) #[inline] #[target_feature(enable = "avx512cd")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntq))] pub unsafe fn _mm512_lzcnt_epi64(a: __m512i) -> __m512i { transmute(vplzcntq(a.as_i64x8(), false)) @@ -378,6 +412,7 @@ pub unsafe fn _mm512_lzcnt_epi64(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_lzcnt_epi64&expand=3501) #[inline] #[target_feature(enable = "avx512cd")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntq))] pub unsafe fn _mm512_mask_lzcnt_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { let zerocount = _mm512_lzcnt_epi64(a).as_i64x8(); @@ -389,6 +424,7 @@ pub unsafe fn _mm512_mask_lzcnt_epi64(src: __m512i, k: __mmask8, a: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_lzcnt_epi64&expand=3502) #[inline] #[target_feature(enable = "avx512cd")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntq))] pub unsafe fn _mm512_maskz_lzcnt_epi64(k: __mmask8, a: __m512i) -> __m512i { let zerocount = _mm512_lzcnt_epi64(a).as_i64x8(); @@ -401,6 +437,7 @@ pub unsafe fn _mm512_maskz_lzcnt_epi64(k: __mmask8, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_lzcnt_epi64&expand=3497) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntq))] pub unsafe fn _mm256_lzcnt_epi64(a: __m256i) -> __m256i { transmute(vplzcntq256(a.as_i64x4(), false)) @@ -411,6 +448,7 @@ pub unsafe fn _mm256_lzcnt_epi64(a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_lzcnt_epi64&expand=3498) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntq))] pub unsafe fn _mm256_mask_lzcnt_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { let zerocount = _mm256_lzcnt_epi64(a).as_i64x4(); @@ -422,6 +460,7 @@ pub unsafe fn _mm256_mask_lzcnt_epi64(src: __m256i, k: __mmask8, a: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_lzcnt_epi64&expand=3499) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntq))] pub unsafe fn _mm256_maskz_lzcnt_epi64(k: __mmask8, a: __m256i) -> __m256i { let zerocount = _mm256_lzcnt_epi64(a).as_i64x4(); @@ -434,6 +473,7 @@ pub unsafe fn _mm256_maskz_lzcnt_epi64(k: __mmask8, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_lzcnt_epi64&expand=3494) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntq))] pub unsafe fn _mm_lzcnt_epi64(a: __m128i) -> __m128i { transmute(vplzcntq128(a.as_i64x2(), false)) @@ -444,6 +484,7 @@ pub unsafe fn _mm_lzcnt_epi64(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_lzcnt_epi64&expand=3495) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntq))] pub unsafe fn _mm_mask_lzcnt_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let zerocount = _mm_lzcnt_epi64(a).as_i64x2(); @@ -455,6 +496,7 @@ pub unsafe fn _mm_mask_lzcnt_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_lzcnt_epi64&expand=3496) #[inline] #[target_feature(enable = "avx512cd,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vplzcntq))] pub unsafe fn _mm_maskz_lzcnt_epi64(k: __mmask8, a: __m128i) -> __m128i { let zerocount = _mm_lzcnt_epi64(a).as_i64x2(); diff --git a/crates/core_arch/src/x86/avx512f.rs b/crates/core_arch/src/x86/avx512f.rs index 2801352924..c87bb0bdf1 100644 --- a/crates/core_arch/src/x86/avx512f.rs +++ b/crates/core_arch/src/x86/avx512f.rs @@ -44,6 +44,7 @@ use stdarch_test::assert_instr; /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_abs_epi32&expand=39) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsd))] pub unsafe fn _mm512_abs_epi32(a: __m512i) -> __m512i { let a = a.as_i32x16(); @@ -61,6 +62,7 @@ pub unsafe fn _mm512_abs_epi32(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_abs_epi32&expand=40) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsd))] pub unsafe fn _mm512_mask_abs_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { let abs = _mm512_abs_epi32(a).as_i32x16(); @@ -74,6 +76,7 @@ pub unsafe fn _mm512_mask_abs_epi32(src: __m512i, k: __mmask16, a: __m512i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_abs_epi32&expand=41) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsd))] pub unsafe fn _mm512_maskz_abs_epi32(k: __mmask16, a: __m512i) -> __m512i { let abs = _mm512_abs_epi32(a).as_i32x16(); @@ -86,6 +89,7 @@ pub unsafe fn _mm512_maskz_abs_epi32(k: __mmask16, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_abs_epi32&expand=37) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsd))] pub unsafe fn _mm256_mask_abs_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { let abs = _mm256_abs_epi32(a).as_i32x8(); @@ -97,6 +101,7 @@ pub unsafe fn _mm256_mask_abs_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_abs_epi32&expand=38) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsd))] pub unsafe fn _mm256_maskz_abs_epi32(k: __mmask8, a: __m256i) -> __m256i { let abs = _mm256_abs_epi32(a).as_i32x8(); @@ -109,6 +114,7 @@ pub unsafe fn _mm256_maskz_abs_epi32(k: __mmask8, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_abs_epi32&expand=34) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsd))] pub unsafe fn _mm_mask_abs_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let abs = _mm_abs_epi32(a).as_i32x4(); @@ -120,6 +126,7 @@ pub unsafe fn _mm_mask_abs_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_abs_epi32&expand=35) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsd))] pub unsafe fn _mm_maskz_abs_epi32(k: __mmask8, a: __m128i) -> __m128i { let abs = _mm_abs_epi32(a).as_i32x4(); @@ -132,6 +139,7 @@ pub unsafe fn _mm_maskz_abs_epi32(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_abs_epi64&expand=48) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsq))] pub unsafe fn _mm512_abs_epi64(a: __m512i) -> __m512i { let a = a.as_i64x8(); @@ -147,6 +155,7 @@ pub unsafe fn _mm512_abs_epi64(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_abs_epi64&expand=49) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsq))] pub unsafe fn _mm512_mask_abs_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { let abs = _mm512_abs_epi64(a).as_i64x8(); @@ -158,6 +167,7 @@ pub unsafe fn _mm512_mask_abs_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_abs_epi64&expand=50) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsq))] pub unsafe fn _mm512_maskz_abs_epi64(k: __mmask8, a: __m512i) -> __m512i { let abs = _mm512_abs_epi64(a).as_i64x8(); @@ -170,6 +180,7 @@ pub unsafe fn _mm512_maskz_abs_epi64(k: __mmask8, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_abs_epi64&expand=45) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsq))] pub unsafe fn _mm256_abs_epi64(a: __m256i) -> __m256i { let a = a.as_i64x4(); @@ -185,6 +196,7 @@ pub unsafe fn _mm256_abs_epi64(a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_abs_epi64&expand=46) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsq))] pub unsafe fn _mm256_mask_abs_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { let abs = _mm256_abs_epi64(a).as_i64x4(); @@ -196,6 +208,7 @@ pub unsafe fn _mm256_mask_abs_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_abs_epi64&expand=45) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpabsq))] pub unsafe fn _mm256_maskz_abs_epi64(k: __mmask8, a: __m256i) -> __m256i { let abs = _mm256_abs_epi64(a).as_i64x4(); @@ -208,6 +221,7 @@ pub unsafe fn _mm256_maskz_abs_epi64(k: __mmask8, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_abs_ps&expand=65) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandq))] pub unsafe fn _mm512_abs_ps(v2: __m512) -> __m512 { let a = _mm512_set1_epi32(0x7FFFFFFF); // from LLVM code @@ -221,6 +235,7 @@ pub unsafe fn _mm512_abs_ps(v2: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_abs_ps&expand=66) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandd))] pub unsafe fn _mm512_mask_abs_ps(src: __m512, k: __mmask16, v2: __m512) -> __m512 { let abs = _mm512_abs_ps(v2).as_f32x16(); @@ -232,6 +247,7 @@ pub unsafe fn _mm512_mask_abs_ps(src: __m512, k: __mmask16, v2: __m512) -> __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_abs_pd&expand=60) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandq))] pub unsafe fn _mm512_abs_pd(v2: __m512d) -> __m512d { let a = _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF); // from LLVM code @@ -245,6 +261,7 @@ pub unsafe fn _mm512_abs_pd(v2: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_abs_pd&expand=61) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandq))] pub unsafe fn _mm512_mask_abs_pd(src: __m512d, k: __mmask8, v2: __m512d) -> __m512d { let abs = _mm512_abs_pd(v2).as_f64x8(); @@ -256,6 +273,7 @@ pub unsafe fn _mm512_mask_abs_pd(src: __m512d, k: __mmask8, v2: __m512d) -> __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mov_epi32&expand=3801) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa32))] pub unsafe fn _mm512_mask_mov_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { let mov = a.as_i32x16(); @@ -267,6 +285,7 @@ pub unsafe fn _mm512_mask_mov_epi32(src: __m512i, k: __mmask16, a: __m512i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mov_epi32&expand=3802) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa32))] pub unsafe fn _mm512_maskz_mov_epi32(k: __mmask16, a: __m512i) -> __m512i { let mov = a.as_i32x16(); @@ -279,6 +298,7 @@ pub unsafe fn _mm512_maskz_mov_epi32(k: __mmask16, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mov_epi32&expand=3799) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa32))] pub unsafe fn _mm256_mask_mov_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { let mov = a.as_i32x8(); @@ -290,6 +310,7 @@ pub unsafe fn _mm256_mask_mov_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mov_epi32&expand=3800) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa32))] pub unsafe fn _mm256_maskz_mov_epi32(k: __mmask8, a: __m256i) -> __m256i { let mov = a.as_i32x8(); @@ -302,6 +323,7 @@ pub unsafe fn _mm256_maskz_mov_epi32(k: __mmask8, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mov_epi32&expand=3797) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa32))] pub unsafe fn _mm_mask_mov_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let mov = a.as_i32x4(); @@ -313,6 +335,7 @@ pub unsafe fn _mm_mask_mov_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mov_epi32&expand=3798) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa32))] pub unsafe fn _mm_maskz_mov_epi32(k: __mmask8, a: __m128i) -> __m128i { let mov = a.as_i32x4(); @@ -325,6 +348,7 @@ pub unsafe fn _mm_maskz_mov_epi32(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mov_epi64&expand=3807) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa64))] pub unsafe fn _mm512_mask_mov_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { let mov = a.as_i64x8(); @@ -336,6 +360,7 @@ pub unsafe fn _mm512_mask_mov_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mov_epi64&expand=3808) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa64))] pub unsafe fn _mm512_maskz_mov_epi64(k: __mmask8, a: __m512i) -> __m512i { let mov = a.as_i64x8(); @@ -348,6 +373,7 @@ pub unsafe fn _mm512_maskz_mov_epi64(k: __mmask8, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mov_epi64&expand=3805) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa64))] pub unsafe fn _mm256_mask_mov_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { let mov = a.as_i64x4(); @@ -359,6 +385,7 @@ pub unsafe fn _mm256_mask_mov_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mov_epi64&expand=3806) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa64))] pub unsafe fn _mm256_maskz_mov_epi64(k: __mmask8, a: __m256i) -> __m256i { let mov = a.as_i64x4(); @@ -371,6 +398,7 @@ pub unsafe fn _mm256_maskz_mov_epi64(k: __mmask8, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mov_epi64&expand=3803) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa64))] pub unsafe fn _mm_mask_mov_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let mov = a.as_i64x2(); @@ -382,6 +410,7 @@ pub unsafe fn _mm_mask_mov_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mov_epi64&expand=3804) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa64))] pub unsafe fn _mm_maskz_mov_epi64(k: __mmask8, a: __m128i) -> __m128i { let mov = a.as_i64x2(); @@ -394,6 +423,7 @@ pub unsafe fn _mm_maskz_mov_epi64(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mov_ps&expand=3825) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] pub unsafe fn _mm512_mask_mov_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { let mov = a.as_f32x16(); @@ -405,6 +435,7 @@ pub unsafe fn _mm512_mask_mov_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mov_ps&expand=3826) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] pub unsafe fn _mm512_maskz_mov_ps(k: __mmask16, a: __m512) -> __m512 { let mov = a.as_f32x16(); @@ -417,6 +448,7 @@ pub unsafe fn _mm512_maskz_mov_ps(k: __mmask16, a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mov_ps&expand=3823) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] pub unsafe fn _mm256_mask_mov_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { let mov = a.as_f32x8(); @@ -428,6 +460,7 @@ pub unsafe fn _mm256_mask_mov_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mov_ps&expand=3824) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] pub unsafe fn _mm256_maskz_mov_ps(k: __mmask8, a: __m256) -> __m256 { let mov = a.as_f32x8(); @@ -440,6 +473,7 @@ pub unsafe fn _mm256_maskz_mov_ps(k: __mmask8, a: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mov_ps&expand=3821) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] pub unsafe fn _mm_mask_mov_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { let mov = a.as_f32x4(); @@ -451,6 +485,7 @@ pub unsafe fn _mm_mask_mov_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mov_ps&expand=3822) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] pub unsafe fn _mm_maskz_mov_ps(k: __mmask8, a: __m128) -> __m128 { let mov = a.as_f32x4(); @@ -463,6 +498,7 @@ pub unsafe fn _mm_maskz_mov_ps(k: __mmask8, a: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mov_pd&expand=3819) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovapd))] pub unsafe fn _mm512_mask_mov_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { let mov = a.as_f64x8(); @@ -474,6 +510,7 @@ pub unsafe fn _mm512_mask_mov_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mov_pd&expand=3820) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovapd))] pub unsafe fn _mm512_maskz_mov_pd(k: __mmask8, a: __m512d) -> __m512d { let mov = a.as_f64x8(); @@ -486,6 +523,7 @@ pub unsafe fn _mm512_maskz_mov_pd(k: __mmask8, a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mov_pd&expand=3817) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovapd))] pub unsafe fn _mm256_mask_mov_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { let mov = a.as_f64x4(); @@ -497,6 +535,7 @@ pub unsafe fn _mm256_mask_mov_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mov_pd&expand=3818) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovapd))] pub unsafe fn _mm256_maskz_mov_pd(k: __mmask8, a: __m256d) -> __m256d { let mov = a.as_f64x4(); @@ -509,6 +548,7 @@ pub unsafe fn _mm256_maskz_mov_pd(k: __mmask8, a: __m256d) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mov_pd&expand=3815) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovapd))] pub unsafe fn _mm_mask_mov_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { let mov = a.as_f64x2(); @@ -520,6 +560,7 @@ pub unsafe fn _mm_mask_mov_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mov_pd&expand=3816) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovapd))] pub unsafe fn _mm_maskz_mov_pd(k: __mmask8, a: __m128d) -> __m128d { let mov = a.as_f64x2(); @@ -532,6 +573,7 @@ pub unsafe fn _mm_maskz_mov_pd(k: __mmask8, a: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_epi32&expand=100) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddd))] pub unsafe fn _mm512_add_epi32(a: __m512i, b: __m512i) -> __m512i { transmute(simd_add(a.as_i32x16(), b.as_i32x16())) @@ -542,6 +584,7 @@ pub unsafe fn _mm512_add_epi32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_epi32&expand=101) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddd))] pub unsafe fn _mm512_mask_add_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let add = _mm512_add_epi32(a, b).as_i32x16(); @@ -553,6 +596,7 @@ pub unsafe fn _mm512_mask_add_epi32(src: __m512i, k: __mmask16, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_epi32&expand=102) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddd))] pub unsafe fn _mm512_maskz_add_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let add = _mm512_add_epi32(a, b).as_i32x16(); @@ -565,6 +609,7 @@ pub unsafe fn _mm512_maskz_add_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_add_epi32&expand=98) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddd))] pub unsafe fn _mm256_mask_add_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let add = _mm256_add_epi32(a, b).as_i32x8(); @@ -576,6 +621,7 @@ pub unsafe fn _mm256_mask_add_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_add_epi32&expand=99) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddd))] pub unsafe fn _mm256_maskz_add_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let add = _mm256_add_epi32(a, b).as_i32x8(); @@ -588,6 +634,7 @@ pub unsafe fn _mm256_maskz_add_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_add_epi32&expand=95) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddd))] pub unsafe fn _mm_mask_add_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let add = _mm_add_epi32(a, b).as_i32x4(); @@ -599,6 +646,7 @@ pub unsafe fn _mm_mask_add_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_epi32&expand=96) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddd))] pub unsafe fn _mm_maskz_add_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let add = _mm_add_epi32(a, b).as_i32x4(); @@ -611,6 +659,7 @@ pub unsafe fn _mm_maskz_add_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_epi64&expand=109) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddq))] pub unsafe fn _mm512_add_epi64(a: __m512i, b: __m512i) -> __m512i { transmute(simd_add(a.as_i64x8(), b.as_i64x8())) @@ -621,6 +670,7 @@ pub unsafe fn _mm512_add_epi64(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_epi64&expand=110) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddq))] pub unsafe fn _mm512_mask_add_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let add = _mm512_add_epi64(a, b).as_i64x8(); @@ -632,6 +682,7 @@ pub unsafe fn _mm512_mask_add_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_epi64&expand=111) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddq))] pub unsafe fn _mm512_maskz_add_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let add = _mm512_add_epi64(a, b).as_i64x8(); @@ -644,6 +695,7 @@ pub unsafe fn _mm512_maskz_add_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_add_epi64&expand=107) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddq))] pub unsafe fn _mm256_mask_add_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let add = _mm256_add_epi64(a, b).as_i64x4(); @@ -655,6 +707,7 @@ pub unsafe fn _mm256_mask_add_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_add_epi64&expand=108) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddq))] pub unsafe fn _mm256_maskz_add_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let add = _mm256_add_epi64(a, b).as_i64x4(); @@ -667,6 +720,7 @@ pub unsafe fn _mm256_maskz_add_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_add_epi64&expand=104) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddq))] pub unsafe fn _mm_mask_add_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let add = _mm_add_epi64(a, b).as_i64x2(); @@ -678,6 +732,7 @@ pub unsafe fn _mm_mask_add_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_epi64&expand=105) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpaddq))] pub unsafe fn _mm_maskz_add_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let add = _mm_add_epi64(a, b).as_i64x2(); @@ -690,6 +745,7 @@ pub unsafe fn _mm_maskz_add_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_ps&expand=139) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddps))] pub unsafe fn _mm512_add_ps(a: __m512, b: __m512) -> __m512 { transmute(simd_add(a.as_f32x16(), b.as_f32x16())) @@ -700,6 +756,7 @@ pub unsafe fn _mm512_add_ps(a: __m512, b: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_ps&expand=140) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddps))] pub unsafe fn _mm512_mask_add_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { let add = _mm512_add_ps(a, b).as_f32x16(); @@ -711,6 +768,7 @@ pub unsafe fn _mm512_mask_add_ps(src: __m512, k: __mmask16, a: __m512, b: __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_ps&expand=141) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddps))] pub unsafe fn _mm512_maskz_add_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { let add = _mm512_add_ps(a, b).as_f32x16(); @@ -723,6 +781,7 @@ pub unsafe fn _mm512_maskz_add_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_add_ps&expand=137) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddps))] pub unsafe fn _mm256_mask_add_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { let add = _mm256_add_ps(a, b).as_f32x8(); @@ -734,6 +793,7 @@ pub unsafe fn _mm256_mask_add_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_add_ps&expand=138) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddps))] pub unsafe fn _mm256_maskz_add_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { let add = _mm256_add_ps(a, b).as_f32x8(); @@ -746,6 +806,7 @@ pub unsafe fn _mm256_maskz_add_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_add_ps&expand=134) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddps))] pub unsafe fn _mm_mask_add_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { let add = _mm_add_ps(a, b).as_f32x4(); @@ -757,6 +818,7 @@ pub unsafe fn _mm_mask_add_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_ps&expand=135) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddps))] pub unsafe fn _mm_maskz_add_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { let add = _mm_add_ps(a, b).as_f32x4(); @@ -769,6 +831,7 @@ pub unsafe fn _mm_maskz_add_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_pd&expand=127) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddpd))] pub unsafe fn _mm512_add_pd(a: __m512d, b: __m512d) -> __m512d { transmute(simd_add(a.as_f64x8(), b.as_f64x8())) @@ -779,6 +842,7 @@ pub unsafe fn _mm512_add_pd(a: __m512d, b: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_pd&expand=128) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddpd))] pub unsafe fn _mm512_mask_add_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { let add = _mm512_add_pd(a, b).as_f64x8(); @@ -790,6 +854,7 @@ pub unsafe fn _mm512_mask_add_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_pd&expand=129) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddpd))] pub unsafe fn _mm512_maskz_add_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { let add = _mm512_add_pd(a, b).as_f64x8(); @@ -802,6 +867,7 @@ pub unsafe fn _mm512_maskz_add_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_add_pd&expand=125) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddpd))] pub unsafe fn _mm256_mask_add_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { let add = _mm256_add_pd(a, b).as_f64x4(); @@ -813,6 +879,7 @@ pub unsafe fn _mm256_mask_add_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_add_pd&expand=126) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddpd))] pub unsafe fn _mm256_maskz_add_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { let add = _mm256_add_pd(a, b).as_f64x4(); @@ -825,6 +892,7 @@ pub unsafe fn _mm256_maskz_add_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_add_pd&expand=122) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddpd))] pub unsafe fn _mm_mask_add_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let add = _mm_add_pd(a, b).as_f64x2(); @@ -836,6 +904,7 @@ pub unsafe fn _mm_mask_add_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_pd&expand=123) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddpd))] pub unsafe fn _mm_maskz_add_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let add = _mm_add_pd(a, b).as_f64x2(); @@ -848,6 +917,7 @@ pub unsafe fn _mm_maskz_add_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_epi32&expand=5694) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubd))] pub unsafe fn _mm512_sub_epi32(a: __m512i, b: __m512i) -> __m512i { transmute(simd_sub(a.as_i32x16(), b.as_i32x16())) @@ -858,6 +928,7 @@ pub unsafe fn _mm512_sub_epi32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_epi32&expand=5692) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubd))] pub unsafe fn _mm512_mask_sub_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let sub = _mm512_sub_epi32(a, b).as_i32x16(); @@ -869,6 +940,7 @@ pub unsafe fn _mm512_mask_sub_epi32(src: __m512i, k: __mmask16, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_epi32&expand=5693) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubd))] pub unsafe fn _mm512_maskz_sub_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let sub = _mm512_sub_epi32(a, b).as_i32x16(); @@ -881,6 +953,7 @@ pub unsafe fn _mm512_maskz_sub_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sub_epi32&expand=5689) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubd))] pub unsafe fn _mm256_mask_sub_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let sub = _mm256_sub_epi32(a, b).as_i32x8(); @@ -892,6 +965,7 @@ pub unsafe fn _mm256_mask_sub_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sub_epi32&expand=5690) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubd))] pub unsafe fn _mm256_maskz_sub_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let sub = _mm256_sub_epi32(a, b).as_i32x8(); @@ -904,6 +978,7 @@ pub unsafe fn _mm256_maskz_sub_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sub_epi32&expand=5686) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubd))] pub unsafe fn _mm_mask_sub_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let sub = _mm_sub_epi32(a, b).as_i32x4(); @@ -915,6 +990,7 @@ pub unsafe fn _mm_mask_sub_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sub_epi32&expand=5687) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubd))] pub unsafe fn _mm_maskz_sub_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let sub = _mm_sub_epi32(a, b).as_i32x4(); @@ -927,6 +1003,7 @@ pub unsafe fn _mm_maskz_sub_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_epi64&expand=5703) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubq))] pub unsafe fn _mm512_sub_epi64(a: __m512i, b: __m512i) -> __m512i { transmute(simd_sub(a.as_i64x8(), b.as_i64x8())) @@ -937,6 +1014,7 @@ pub unsafe fn _mm512_sub_epi64(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_epi64&expand=5701) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubq))] pub unsafe fn _mm512_mask_sub_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let sub = _mm512_sub_epi64(a, b).as_i64x8(); @@ -948,6 +1026,7 @@ pub unsafe fn _mm512_mask_sub_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_epi64&expand=5702) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubq))] pub unsafe fn _mm512_maskz_sub_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let sub = _mm512_sub_epi64(a, b).as_i64x8(); @@ -960,6 +1039,7 @@ pub unsafe fn _mm512_maskz_sub_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sub_epi64&expand=5698) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubq))] pub unsafe fn _mm256_mask_sub_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let sub = _mm256_sub_epi64(a, b).as_i64x4(); @@ -971,6 +1051,7 @@ pub unsafe fn _mm256_mask_sub_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sub_epi64&expand=5699) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubq))] pub unsafe fn _mm256_maskz_sub_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let sub = _mm256_sub_epi64(a, b).as_i64x4(); @@ -983,6 +1064,7 @@ pub unsafe fn _mm256_maskz_sub_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sub_epi64&expand=5695) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubq))] pub unsafe fn _mm_mask_sub_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let sub = _mm_sub_epi64(a, b).as_i64x2(); @@ -994,6 +1076,7 @@ pub unsafe fn _mm_mask_sub_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sub_epi64&expand=5696) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsubq))] pub unsafe fn _mm_maskz_sub_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let sub = _mm_sub_epi64(a, b).as_i64x2(); @@ -1006,6 +1089,7 @@ pub unsafe fn _mm_maskz_sub_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_ps&expand=5733) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubps))] pub unsafe fn _mm512_sub_ps(a: __m512, b: __m512) -> __m512 { transmute(simd_sub(a.as_f32x16(), b.as_f32x16())) @@ -1016,6 +1100,7 @@ pub unsafe fn _mm512_sub_ps(a: __m512, b: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_ps&expand=5731) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubps))] pub unsafe fn _mm512_mask_sub_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { let sub = _mm512_sub_ps(a, b).as_f32x16(); @@ -1027,6 +1112,7 @@ pub unsafe fn _mm512_mask_sub_ps(src: __m512, k: __mmask16, a: __m512, b: __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_ps&expand=5732) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubps))] pub unsafe fn _mm512_maskz_sub_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { let sub = _mm512_sub_ps(a, b).as_f32x16(); @@ -1039,6 +1125,7 @@ pub unsafe fn _mm512_maskz_sub_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sub_ps&expand=5728) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubps))] pub unsafe fn _mm256_mask_sub_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { let sub = _mm256_sub_ps(a, b).as_f32x8(); @@ -1050,6 +1137,7 @@ pub unsafe fn _mm256_mask_sub_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sub_ps&expand=5729) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubps))] pub unsafe fn _mm256_maskz_sub_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { let sub = _mm256_sub_ps(a, b).as_f32x8(); @@ -1062,6 +1150,7 @@ pub unsafe fn _mm256_maskz_sub_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sub_ps&expand=5725) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubps))] pub unsafe fn _mm_mask_sub_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { let sub = _mm_sub_ps(a, b).as_f32x4(); @@ -1073,6 +1162,7 @@ pub unsafe fn _mm_mask_sub_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sub_ps&expand=5726) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubps))] pub unsafe fn _mm_maskz_sub_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { let sub = _mm_sub_ps(a, b).as_f32x4(); @@ -1085,6 +1175,7 @@ pub unsafe fn _mm_maskz_sub_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_pd&expand=5721) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubpd))] pub unsafe fn _mm512_sub_pd(a: __m512d, b: __m512d) -> __m512d { transmute(simd_sub(a.as_f64x8(), b.as_f64x8())) @@ -1095,6 +1186,7 @@ pub unsafe fn _mm512_sub_pd(a: __m512d, b: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_pd&expand=5719) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubpd))] pub unsafe fn _mm512_mask_sub_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { let sub = _mm512_sub_pd(a, b).as_f64x8(); @@ -1106,6 +1198,7 @@ pub unsafe fn _mm512_mask_sub_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_pd&expand=5720) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubpd))] pub unsafe fn _mm512_maskz_sub_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { let sub = _mm512_sub_pd(a, b).as_f64x8(); @@ -1118,6 +1211,7 @@ pub unsafe fn _mm512_maskz_sub_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sub_pd&expand=5716) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubpd))] pub unsafe fn _mm256_mask_sub_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { let sub = _mm256_sub_pd(a, b).as_f64x4(); @@ -1129,6 +1223,7 @@ pub unsafe fn _mm256_mask_sub_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sub_pd&expand=5717) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubpd))] pub unsafe fn _mm256_maskz_sub_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { let sub = _mm256_sub_pd(a, b).as_f64x4(); @@ -1141,6 +1236,7 @@ pub unsafe fn _mm256_maskz_sub_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sub_pd&expand=5713) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubpd))] pub unsafe fn _mm_mask_sub_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let sub = _mm_sub_pd(a, b).as_f64x2(); @@ -1152,6 +1248,7 @@ pub unsafe fn _mm_mask_sub_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sub_pd&expand=5714) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubpd))] pub unsafe fn _mm_maskz_sub_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let sub = _mm_sub_pd(a, b).as_f64x2(); @@ -1164,6 +1261,7 @@ pub unsafe fn _mm_maskz_sub_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mul_epi32&expand=3907) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmuldq))] pub unsafe fn _mm512_mul_epi32(a: __m512i, b: __m512i) -> __m512i { transmute(vpmuldq(a.as_i32x16(), b.as_i32x16())) @@ -1174,6 +1272,7 @@ pub unsafe fn _mm512_mul_epi32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mul_epi32&expand=3905) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmuldq))] pub unsafe fn _mm512_mask_mul_epi32(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let mul = _mm512_mul_epi32(a, b).as_i64x8(); @@ -1185,6 +1284,7 @@ pub unsafe fn _mm512_mask_mul_epi32(src: __m512i, k: __mmask8, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mul_epi32&expand=3906) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmuldq))] pub unsafe fn _mm512_maskz_mul_epi32(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let mul = _mm512_mul_epi32(a, b).as_i64x8(); @@ -1197,6 +1297,7 @@ pub unsafe fn _mm512_maskz_mul_epi32(k: __mmask8, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mul_epi32&expand=3902) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmuldq))] pub unsafe fn _mm256_mask_mul_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let mul = _mm256_mul_epi32(a, b).as_i64x4(); @@ -1208,6 +1309,7 @@ pub unsafe fn _mm256_mask_mul_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mul_epi32&expand=3903) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmuldq))] pub unsafe fn _mm256_maskz_mul_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let mul = _mm256_mul_epi32(a, b).as_i64x4(); @@ -1220,6 +1322,7 @@ pub unsafe fn _mm256_maskz_mul_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mul_epi32&expand=3899) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmuldq))] pub unsafe fn _mm_mask_mul_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let mul = _mm_mul_epi32(a, b).as_i64x2(); @@ -1231,6 +1334,7 @@ pub unsafe fn _mm_mask_mul_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mul_epi32&expand=3900) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmuldq))] pub unsafe fn _mm_maskz_mul_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let mul = _mm_mul_epi32(a, b).as_i64x2(); @@ -1243,6 +1347,7 @@ pub unsafe fn _mm_maskz_mul_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mullo_epi&expand=4005) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulld))] pub unsafe fn _mm512_mullo_epi32(a: __m512i, b: __m512i) -> __m512i { transmute(simd_mul(a.as_i32x16(), b.as_i32x16())) @@ -1253,6 +1358,7 @@ pub unsafe fn _mm512_mullo_epi32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mullo_epi32&expand=4003) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulld))] pub unsafe fn _mm512_mask_mullo_epi32( src: __m512i, @@ -1269,6 +1375,7 @@ pub unsafe fn _mm512_mask_mullo_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mullo_epi32&expand=4004) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulld))] pub unsafe fn _mm512_maskz_mullo_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let mul = _mm512_mullo_epi32(a, b).as_i32x16(); @@ -1281,6 +1388,7 @@ pub unsafe fn _mm512_maskz_mullo_epi32(k: __mmask16, a: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mullo_epi32&expand=4000) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulld))] pub unsafe fn _mm256_mask_mullo_epi32( src: __m256i, @@ -1297,6 +1405,7 @@ pub unsafe fn _mm256_mask_mullo_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mullo_epi32&expand=4001) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulld))] pub unsafe fn _mm256_maskz_mullo_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let mul = _mm256_mullo_epi32(a, b).as_i32x8(); @@ -1309,6 +1418,7 @@ pub unsafe fn _mm256_maskz_mullo_epi32(k: __mmask8, a: __m256i, b: __m256i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mullo_epi32&expand=3997) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulld))] pub unsafe fn _mm_mask_mullo_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let mul = _mm_mullo_epi32(a, b).as_i32x4(); @@ -1320,6 +1430,7 @@ pub unsafe fn _mm_mask_mullo_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mullo_epi32&expand=3998) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmulld))] pub unsafe fn _mm_maskz_mullo_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let mul = _mm_mullo_epi32(a, b).as_i32x4(); @@ -1334,6 +1445,7 @@ pub unsafe fn _mm_maskz_mullo_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m1 /// This intrinsic generates a sequence of instructions, which may perform worse than a native instruction. Consider the performance impact of this intrinsic. #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mullox_epi64(a: __m512i, b: __m512i) -> __m512i { transmute(simd_mul(a.as_i64x8(), b.as_i64x8())) } @@ -1345,6 +1457,7 @@ pub unsafe fn _mm512_mullox_epi64(a: __m512i, b: __m512i) -> __m512i { /// This intrinsic generates a sequence of instructions, which may perform worse than a native instruction. Consider the performance impact of this intrinsic. #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_mullox_epi64( src: __m512i, k: __mmask8, @@ -1360,6 +1473,7 @@ pub unsafe fn _mm512_mask_mullox_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mul_epu32&expand=3916) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmuludq))] pub unsafe fn _mm512_mul_epu32(a: __m512i, b: __m512i) -> __m512i { transmute(vpmuludq(a.as_u32x16(), b.as_u32x16())) @@ -1370,6 +1484,7 @@ pub unsafe fn _mm512_mul_epu32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mask_mul_epu32&expand=3914) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmuludq))] pub unsafe fn _mm512_mask_mul_epu32(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let mul = _mm512_mul_epu32(a, b).as_u64x8(); @@ -1381,6 +1496,7 @@ pub unsafe fn _mm512_mask_mul_epu32(src: __m512i, k: __mmask8, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_maskz_mul_epu32&expand=3915) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmuludq))] pub unsafe fn _mm512_maskz_mul_epu32(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let mul = _mm512_mul_epu32(a, b).as_u64x8(); @@ -1393,6 +1509,7 @@ pub unsafe fn _mm512_maskz_mul_epu32(k: __mmask8, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mul_epu32&expand=3911) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmuludq))] pub unsafe fn _mm256_mask_mul_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let mul = _mm256_mul_epu32(a, b).as_u64x4(); @@ -1404,6 +1521,7 @@ pub unsafe fn _mm256_mask_mul_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mul_epu32&expand=3912) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmuludq))] pub unsafe fn _mm256_maskz_mul_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let mul = _mm256_mul_epu32(a, b).as_u64x4(); @@ -1416,6 +1534,7 @@ pub unsafe fn _mm256_maskz_mul_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mul_epu32&expand=3908) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmuludq))] pub unsafe fn _mm_mask_mul_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let mul = _mm_mul_epu32(a, b).as_u64x2(); @@ -1427,6 +1546,7 @@ pub unsafe fn _mm_mask_mul_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mul_epu32&expand=3909) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmuludq))] pub unsafe fn _mm_maskz_mul_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let mul = _mm_mul_epu32(a, b).as_u64x2(); @@ -1439,6 +1559,7 @@ pub unsafe fn _mm_maskz_mul_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mul_ps&expand=3934) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulps))] pub unsafe fn _mm512_mul_ps(a: __m512, b: __m512) -> __m512 { transmute(simd_mul(a.as_f32x16(), b.as_f32x16())) @@ -1449,6 +1570,7 @@ pub unsafe fn _mm512_mul_ps(a: __m512, b: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mul_ps&expand=3932) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulps))] pub unsafe fn _mm512_mask_mul_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { let mul = _mm512_mul_ps(a, b).as_f32x16(); @@ -1460,6 +1582,7 @@ pub unsafe fn _mm512_mask_mul_ps(src: __m512, k: __mmask16, a: __m512, b: __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mul_ps&expand=3933) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulps))] pub unsafe fn _mm512_maskz_mul_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { let mul = _mm512_mul_ps(a, b).as_f32x16(); @@ -1472,6 +1595,7 @@ pub unsafe fn _mm512_maskz_mul_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mul_ps&expand=3929) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulps))] pub unsafe fn _mm256_mask_mul_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { let mul = _mm256_mul_ps(a, b).as_f32x8(); @@ -1483,6 +1607,7 @@ pub unsafe fn _mm256_mask_mul_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mul_ps&expand=3930) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulps))] pub unsafe fn _mm256_maskz_mul_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { let mul = _mm256_mul_ps(a, b).as_f32x8(); @@ -1495,6 +1620,7 @@ pub unsafe fn _mm256_maskz_mul_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mul_ps&expand=3926) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulps))] pub unsafe fn _mm_mask_mul_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { let mul = _mm_mul_ps(a, b).as_f32x4(); @@ -1506,6 +1632,7 @@ pub unsafe fn _mm_mask_mul_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mul_ps&expand=3927) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulps))] pub unsafe fn _mm_maskz_mul_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { let mul = _mm_mul_ps(a, b).as_f32x4(); @@ -1518,6 +1645,7 @@ pub unsafe fn _mm_maskz_mul_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mul_pd&expand=3925) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulpd))] pub unsafe fn _mm512_mul_pd(a: __m512d, b: __m512d) -> __m512d { transmute(simd_mul(a.as_f64x8(), b.as_f64x8())) @@ -1528,6 +1656,7 @@ pub unsafe fn _mm512_mul_pd(a: __m512d, b: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mul_pd&expand=3923) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulpd))] pub unsafe fn _mm512_mask_mul_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { let mul = _mm512_mul_pd(a, b).as_f64x8(); @@ -1539,6 +1668,7 @@ pub unsafe fn _mm512_mask_mul_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mul_pd&expand=3924) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulpd))] pub unsafe fn _mm512_maskz_mul_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { let mul = _mm512_mul_pd(a, b).as_f64x8(); @@ -1551,6 +1681,7 @@ pub unsafe fn _mm512_maskz_mul_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mul_pd&expand=3920) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulpd))] pub unsafe fn _mm256_mask_mul_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { let mul = _mm256_mul_pd(a, b).as_f64x4(); @@ -1562,6 +1693,7 @@ pub unsafe fn _mm256_mask_mul_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mul_pd&expand=3921) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulpd))] pub unsafe fn _mm256_maskz_mul_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { let mul = _mm256_mul_pd(a, b).as_f64x4(); @@ -1574,6 +1706,7 @@ pub unsafe fn _mm256_maskz_mul_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mul_pd&expand=3917) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulpd))] pub unsafe fn _mm_mask_mul_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let mul = _mm_mul_pd(a, b).as_f64x2(); @@ -1585,6 +1718,7 @@ pub unsafe fn _mm_mask_mul_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mul_pd&expand=3918) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulpd))] pub unsafe fn _mm_maskz_mul_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let mul = _mm_mul_pd(a, b).as_f64x2(); @@ -1597,6 +1731,7 @@ pub unsafe fn _mm_maskz_mul_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_div_ps&expand=2162) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivps))] pub unsafe fn _mm512_div_ps(a: __m512, b: __m512) -> __m512 { transmute(simd_div(a.as_f32x16(), b.as_f32x16())) @@ -1607,6 +1742,7 @@ pub unsafe fn _mm512_div_ps(a: __m512, b: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_div_ps&expand=2163) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivps))] pub unsafe fn _mm512_mask_div_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { let div = _mm512_div_ps(a, b).as_f32x16(); @@ -1618,6 +1754,7 @@ pub unsafe fn _mm512_mask_div_ps(src: __m512, k: __mmask16, a: __m512, b: __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_div_ps&expand=2164) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivps))] pub unsafe fn _mm512_maskz_div_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { let div = _mm512_div_ps(a, b).as_f32x16(); @@ -1630,6 +1767,7 @@ pub unsafe fn _mm512_maskz_div_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_div_ps&expand=2160) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivps))] pub unsafe fn _mm256_mask_div_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { let div = _mm256_div_ps(a, b).as_f32x8(); @@ -1641,6 +1779,7 @@ pub unsafe fn _mm256_mask_div_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_div_ps&expand=2161) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivps))] pub unsafe fn _mm256_maskz_div_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { let div = _mm256_div_ps(a, b).as_f32x8(); @@ -1653,6 +1792,7 @@ pub unsafe fn _mm256_maskz_div_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_div_ps&expand=2157) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivps))] pub unsafe fn _mm_mask_div_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { let div = _mm_div_ps(a, b).as_f32x4(); @@ -1664,6 +1804,7 @@ pub unsafe fn _mm_mask_div_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_div_ps&expand=2158) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivps))] pub unsafe fn _mm_maskz_div_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { let div = _mm_div_ps(a, b).as_f32x4(); @@ -1676,6 +1817,7 @@ pub unsafe fn _mm_maskz_div_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_div_pd&expand=2153) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivpd))] pub unsafe fn _mm512_div_pd(a: __m512d, b: __m512d) -> __m512d { transmute(simd_div(a.as_f64x8(), b.as_f64x8())) @@ -1686,6 +1828,7 @@ pub unsafe fn _mm512_div_pd(a: __m512d, b: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_div_pd&expand=2154) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivpd))] pub unsafe fn _mm512_mask_div_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { let div = _mm512_div_pd(a, b).as_f64x8(); @@ -1697,6 +1840,7 @@ pub unsafe fn _mm512_mask_div_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_div_pd&expand=2155) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivpd))] pub unsafe fn _mm512_maskz_div_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { let div = _mm512_div_pd(a, b).as_f64x8(); @@ -1709,6 +1853,7 @@ pub unsafe fn _mm512_maskz_div_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_div_pd&expand=2151) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivpd))] pub unsafe fn _mm256_mask_div_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { let div = _mm256_div_pd(a, b).as_f64x4(); @@ -1720,6 +1865,7 @@ pub unsafe fn _mm256_mask_div_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_div_pd&expand=2152) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivpd))] pub unsafe fn _mm256_maskz_div_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { let div = _mm256_div_pd(a, b).as_f64x4(); @@ -1732,6 +1878,7 @@ pub unsafe fn _mm256_maskz_div_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_div_pd&expand=2148) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivpd))] pub unsafe fn _mm_mask_div_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let div = _mm_div_pd(a, b).as_f64x2(); @@ -1743,6 +1890,7 @@ pub unsafe fn _mm_mask_div_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_div_pd&expand=2149) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivpd))] pub unsafe fn _mm_maskz_div_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let div = _mm_div_pd(a, b).as_f64x2(); @@ -1755,6 +1903,7 @@ pub unsafe fn _mm_maskz_div_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epi32&expand=3582) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsd))] pub unsafe fn _mm512_max_epi32(a: __m512i, b: __m512i) -> __m512i { transmute(vpmaxsd(a.as_i32x16(), b.as_i32x16())) @@ -1765,6 +1914,7 @@ pub unsafe fn _mm512_max_epi32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_epi32&expand=3580) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsd))] pub unsafe fn _mm512_mask_max_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let max = _mm512_max_epi32(a, b).as_i32x16(); @@ -1776,6 +1926,7 @@ pub unsafe fn _mm512_mask_max_epi32(src: __m512i, k: __mmask16, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_epi32&expand=3581) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsd))] pub unsafe fn _mm512_maskz_max_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let max = _mm512_max_epi32(a, b).as_i32x16(); @@ -1788,6 +1939,7 @@ pub unsafe fn _mm512_maskz_max_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_epi32&expand=3577) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsd))] pub unsafe fn _mm256_mask_max_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let max = _mm256_max_epi32(a, b).as_i32x8(); @@ -1799,6 +1951,7 @@ pub unsafe fn _mm256_mask_max_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_epi32&expand=3578) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsd))] pub unsafe fn _mm256_maskz_max_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let max = _mm256_max_epi32(a, b).as_i32x8(); @@ -1811,6 +1964,7 @@ pub unsafe fn _mm256_maskz_max_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_epi32&expand=3574) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsd))] pub unsafe fn _mm_mask_max_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let max = _mm_max_epi32(a, b).as_i32x4(); @@ -1822,6 +1976,7 @@ pub unsafe fn _mm_mask_max_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_epi32&expand=3575) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsd))] pub unsafe fn _mm_maskz_max_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let max = _mm_max_epi32(a, b).as_i32x4(); @@ -1834,6 +1989,7 @@ pub unsafe fn _mm_maskz_max_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epi64&expand=3591) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsq))] pub unsafe fn _mm512_max_epi64(a: __m512i, b: __m512i) -> __m512i { transmute(vpmaxsq(a.as_i64x8(), b.as_i64x8())) @@ -1844,6 +2000,7 @@ pub unsafe fn _mm512_max_epi64(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_epi64&expand=3589) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsq))] pub unsafe fn _mm512_mask_max_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let max = _mm512_max_epi64(a, b).as_i64x8(); @@ -1855,6 +2012,7 @@ pub unsafe fn _mm512_mask_max_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_epi64&expand=3590) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsq))] pub unsafe fn _mm512_maskz_max_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let max = _mm512_max_epi64(a, b).as_i64x8(); @@ -1867,6 +2025,7 @@ pub unsafe fn _mm512_maskz_max_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_max_epi64&expand=3588) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsq))] pub unsafe fn _mm256_max_epi64(a: __m256i, b: __m256i) -> __m256i { transmute(vpmaxsq256(a.as_i64x4(), b.as_i64x4())) @@ -1877,6 +2036,7 @@ pub unsafe fn _mm256_max_epi64(a: __m256i, b: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_epi64&expand=3586) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsq))] pub unsafe fn _mm256_mask_max_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let max = _mm256_max_epi64(a, b).as_i64x4(); @@ -1888,6 +2048,7 @@ pub unsafe fn _mm256_mask_max_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_epi64&expand=3587) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsq))] pub unsafe fn _mm256_maskz_max_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let max = _mm256_max_epi64(a, b).as_i64x4(); @@ -1900,6 +2061,7 @@ pub unsafe fn _mm256_maskz_max_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epi64&expand=3585) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsq))] pub unsafe fn _mm_max_epi64(a: __m128i, b: __m128i) -> __m128i { transmute(vpmaxsq128(a.as_i64x2(), b.as_i64x2())) @@ -1910,6 +2072,7 @@ pub unsafe fn _mm_max_epi64(a: __m128i, b: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_epi64&expand=3583) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsq))] pub unsafe fn _mm_mask_max_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let max = _mm_max_epi64(a, b).as_i64x2(); @@ -1921,6 +2084,7 @@ pub unsafe fn _mm_mask_max_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_epi64&expand=3584) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxsq))] pub unsafe fn _mm_maskz_max_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let max = _mm_max_epi64(a, b).as_i64x2(); @@ -1933,6 +2097,7 @@ pub unsafe fn _mm_maskz_max_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_ps&expand=3655) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxps))] pub unsafe fn _mm512_max_ps(a: __m512, b: __m512) -> __m512 { transmute(vmaxps( @@ -1947,6 +2112,7 @@ pub unsafe fn _mm512_max_ps(a: __m512, b: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_ps&expand=3653) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxps))] pub unsafe fn _mm512_mask_max_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { let max = _mm512_max_ps(a, b).as_f32x16(); @@ -1958,6 +2124,7 @@ pub unsafe fn _mm512_mask_max_ps(src: __m512, k: __mmask16, a: __m512, b: __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_ps&expand=3654) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxps))] pub unsafe fn _mm512_maskz_max_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { let max = _mm512_max_ps(a, b).as_f32x16(); @@ -1970,6 +2137,7 @@ pub unsafe fn _mm512_maskz_max_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_ps&expand=3650) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxps))] pub unsafe fn _mm256_mask_max_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { let max = _mm256_max_ps(a, b).as_f32x8(); @@ -1981,6 +2149,7 @@ pub unsafe fn _mm256_mask_max_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_ps&expand=3651) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxps))] pub unsafe fn _mm256_maskz_max_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { let max = _mm256_max_ps(a, b).as_f32x8(); @@ -1993,6 +2162,7 @@ pub unsafe fn _mm256_maskz_max_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_ps&expand=3647) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxps))] pub unsafe fn _mm_mask_max_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { let max = _mm_max_ps(a, b).as_f32x4(); @@ -2004,6 +2174,7 @@ pub unsafe fn _mm_mask_max_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_ps&expand=3648) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxps))] pub unsafe fn _mm_maskz_max_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { let max = _mm_max_ps(a, b).as_f32x4(); @@ -2016,6 +2187,7 @@ pub unsafe fn _mm_maskz_max_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_pd&expand=3645) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxpd))] pub unsafe fn _mm512_max_pd(a: __m512d, b: __m512d) -> __m512d { transmute(vmaxpd(a.as_f64x8(), b.as_f64x8(), _MM_FROUND_CUR_DIRECTION)) @@ -2026,6 +2198,7 @@ pub unsafe fn _mm512_max_pd(a: __m512d, b: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_pd&expand=3643) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxpd))] pub unsafe fn _mm512_mask_max_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { let max = _mm512_max_pd(a, b).as_f64x8(); @@ -2037,6 +2210,7 @@ pub unsafe fn _mm512_mask_max_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_pd&expand=3644) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxpd))] pub unsafe fn _mm512_maskz_max_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { let max = _mm512_max_pd(a, b).as_f64x8(); @@ -2049,6 +2223,7 @@ pub unsafe fn _mm512_maskz_max_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_pd&expand=3640) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxpd))] pub unsafe fn _mm256_mask_max_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { let max = _mm256_max_pd(a, b).as_f64x4(); @@ -2060,6 +2235,7 @@ pub unsafe fn _mm256_mask_max_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_pd&expand=3641) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxpd))] pub unsafe fn _mm256_maskz_max_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { let max = _mm256_max_pd(a, b).as_f64x4(); @@ -2072,6 +2248,7 @@ pub unsafe fn _mm256_maskz_max_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_pd&expand=3637) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxpd))] pub unsafe fn _mm_mask_max_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let max = _mm_max_pd(a, b).as_f64x2(); @@ -2083,6 +2260,7 @@ pub unsafe fn _mm_mask_max_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_pd&expand=3638) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxpd))] pub unsafe fn _mm_maskz_max_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let max = _mm_max_pd(a, b).as_f64x2(); @@ -2095,6 +2273,7 @@ pub unsafe fn _mm_maskz_max_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epu32&expand=3618) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxud))] pub unsafe fn _mm512_max_epu32(a: __m512i, b: __m512i) -> __m512i { transmute(vpmaxud(a.as_u32x16(), b.as_u32x16())) @@ -2105,6 +2284,7 @@ pub unsafe fn _mm512_max_epu32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_epu32&expand=3616) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxud))] pub unsafe fn _mm512_mask_max_epu32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let max = _mm512_max_epu32(a, b).as_u32x16(); @@ -2116,6 +2296,7 @@ pub unsafe fn _mm512_mask_max_epu32(src: __m512i, k: __mmask16, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_epu32&expand=3617) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxud))] pub unsafe fn _mm512_maskz_max_epu32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let max = _mm512_max_epu32(a, b).as_u32x16(); @@ -2128,6 +2309,7 @@ pub unsafe fn _mm512_maskz_max_epu32(k: __mmask16, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_epu32&expand=3613) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxud))] pub unsafe fn _mm256_mask_max_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let max = _mm256_max_epu32(a, b).as_u32x8(); @@ -2139,6 +2321,7 @@ pub unsafe fn _mm256_mask_max_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_epu32&expand=3614) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxud))] pub unsafe fn _mm256_maskz_max_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let max = _mm256_max_epu32(a, b).as_u32x8(); @@ -2151,6 +2334,7 @@ pub unsafe fn _mm256_maskz_max_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_epu32&expand=3610) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxud))] pub unsafe fn _mm_mask_max_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let max = _mm_max_epu32(a, b).as_u32x4(); @@ -2162,6 +2346,7 @@ pub unsafe fn _mm_mask_max_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_epu32&expand=3611) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxud))] pub unsafe fn _mm_maskz_max_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let max = _mm_max_epu32(a, b).as_u32x4(); @@ -2174,6 +2359,7 @@ pub unsafe fn _mm_maskz_max_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epu64&expand=3627) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxuq))] pub unsafe fn _mm512_max_epu64(a: __m512i, b: __m512i) -> __m512i { transmute(vpmaxuq(a.as_u64x8(), b.as_u64x8())) @@ -2184,6 +2370,7 @@ pub unsafe fn _mm512_max_epu64(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_epu64&expand=3625) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxuq))] pub unsafe fn _mm512_mask_max_epu64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let max = _mm512_max_epu64(a, b).as_u64x8(); @@ -2195,6 +2382,7 @@ pub unsafe fn _mm512_mask_max_epu64(src: __m512i, k: __mmask8, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_epu&expand=3626) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxuq))] pub unsafe fn _mm512_maskz_max_epu64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let max = _mm512_max_epu64(a, b).as_u64x8(); @@ -2207,6 +2395,7 @@ pub unsafe fn _mm512_maskz_max_epu64(k: __mmask8, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_max_epu64&expand=3624) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxuq))] pub unsafe fn _mm256_max_epu64(a: __m256i, b: __m256i) -> __m256i { transmute(vpmaxuq256(a.as_u64x4(), b.as_u64x4())) @@ -2217,6 +2406,7 @@ pub unsafe fn _mm256_max_epu64(a: __m256i, b: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_epu64&expand=3622) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxuq))] pub unsafe fn _mm256_mask_max_epu64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let max = _mm256_max_epu64(a, b).as_u64x4(); @@ -2228,6 +2418,7 @@ pub unsafe fn _mm256_mask_max_epu64(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_epu64&expand=3623) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxuq))] pub unsafe fn _mm256_maskz_max_epu64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let max = _mm256_max_epu64(a, b).as_u64x4(); @@ -2240,6 +2431,7 @@ pub unsafe fn _mm256_maskz_max_epu64(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu64&expand=3621) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxuq))] pub unsafe fn _mm_max_epu64(a: __m128i, b: __m128i) -> __m128i { transmute(vpmaxuq128(a.as_u64x2(), b.as_u64x2())) @@ -2250,6 +2442,7 @@ pub unsafe fn _mm_max_epu64(a: __m128i, b: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_epu64&expand=3619) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxuq))] pub unsafe fn _mm_mask_max_epu64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let max = _mm_max_epu64(a, b).as_u64x2(); @@ -2261,6 +2454,7 @@ pub unsafe fn _mm_mask_max_epu64(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_epu64&expand=3620) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmaxuq))] pub unsafe fn _mm_maskz_max_epu64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let max = _mm_max_epu64(a, b).as_u64x2(); @@ -2273,6 +2467,7 @@ pub unsafe fn _mm_maskz_max_epu64(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_epi32&expand=3696) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsd))] pub unsafe fn _mm512_min_epi32(a: __m512i, b: __m512i) -> __m512i { transmute(vpminsd(a.as_i32x16(), b.as_i32x16())) @@ -2283,6 +2478,7 @@ pub unsafe fn _mm512_min_epi32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_epi32&expand=3694) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsd))] pub unsafe fn _mm512_mask_min_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let min = _mm512_min_epi32(a, b).as_i32x16(); @@ -2294,6 +2490,7 @@ pub unsafe fn _mm512_mask_min_epi32(src: __m512i, k: __mmask16, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_epi32&expand=3695) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsd))] pub unsafe fn _mm512_maskz_min_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let min = _mm512_min_epi32(a, b).as_i32x16(); @@ -2306,6 +2503,7 @@ pub unsafe fn _mm512_maskz_min_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_epi32&expand=3691) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsd))] pub unsafe fn _mm256_mask_min_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let min = _mm256_min_epi32(a, b).as_i32x8(); @@ -2317,6 +2515,7 @@ pub unsafe fn _mm256_mask_min_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_epi32&expand=3692) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsd))] pub unsafe fn _mm256_maskz_min_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let min = _mm256_min_epi32(a, b).as_i32x8(); @@ -2329,6 +2528,7 @@ pub unsafe fn _mm256_maskz_min_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_epi32&expand=3688) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsd))] pub unsafe fn _mm_mask_min_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let min = _mm_min_epi32(a, b).as_i32x4(); @@ -2340,6 +2540,7 @@ pub unsafe fn _mm_mask_min_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_epi32&expand=3689) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsd))] pub unsafe fn _mm_maskz_min_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let min = _mm_min_epi32(a, b).as_i32x4(); @@ -2352,6 +2553,7 @@ pub unsafe fn _mm_maskz_min_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_epi64&expand=3705) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsq))] pub unsafe fn _mm512_min_epi64(a: __m512i, b: __m512i) -> __m512i { transmute(vpminsq(a.as_i64x8(), b.as_i64x8())) @@ -2362,6 +2564,7 @@ pub unsafe fn _mm512_min_epi64(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_epi64&expand=3703) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsq))] pub unsafe fn _mm512_mask_min_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let min = _mm512_min_epi64(a, b).as_i64x8(); @@ -2373,6 +2576,7 @@ pub unsafe fn _mm512_mask_min_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_maskz_min_epi64&expand=3704) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsq))] pub unsafe fn _mm512_maskz_min_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let min = _mm512_min_epi64(a, b).as_i64x8(); @@ -2385,6 +2589,7 @@ pub unsafe fn _mm512_maskz_min_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_min_epi64&expand=3702) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsq))] pub unsafe fn _mm256_min_epi64(a: __m256i, b: __m256i) -> __m256i { transmute(vpminsq256(a.as_i64x4(), b.as_i64x4())) @@ -2395,6 +2600,7 @@ pub unsafe fn _mm256_min_epi64(a: __m256i, b: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_epi64&expand=3700) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsq))] pub unsafe fn _mm256_mask_min_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let min = _mm256_min_epi64(a, b).as_i64x4(); @@ -2406,6 +2612,7 @@ pub unsafe fn _mm256_mask_min_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_epi64&expand=3701) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminsq))] pub unsafe fn _mm256_maskz_min_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let min = _mm256_min_epi64(a, b).as_i64x4(); @@ -2418,6 +2625,7 @@ pub unsafe fn _mm256_maskz_min_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_ps&expand=3769) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminps))] pub unsafe fn _mm512_min_ps(a: __m512, b: __m512) -> __m512 { transmute(vminps( @@ -2432,6 +2640,7 @@ pub unsafe fn _mm512_min_ps(a: __m512, b: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_ps&expand=3767) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminps))] pub unsafe fn _mm512_mask_min_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { let min = _mm512_min_ps(a, b).as_f32x16(); @@ -2443,6 +2652,7 @@ pub unsafe fn _mm512_mask_min_ps(src: __m512, k: __mmask16, a: __m512, b: __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_ps&expand=3768) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminps))] pub unsafe fn _mm512_maskz_min_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { let min = _mm512_min_ps(a, b).as_f32x16(); @@ -2455,6 +2665,7 @@ pub unsafe fn _mm512_maskz_min_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_ps&expand=3764) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminps))] pub unsafe fn _mm256_mask_min_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { let min = _mm256_min_ps(a, b).as_f32x8(); @@ -2466,6 +2677,7 @@ pub unsafe fn _mm256_mask_min_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_ps&expand=3765) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminps))] pub unsafe fn _mm256_maskz_min_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { let min = _mm256_min_ps(a, b).as_f32x8(); @@ -2478,6 +2690,7 @@ pub unsafe fn _mm256_maskz_min_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_ps&expand=3761) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminps))] pub unsafe fn _mm_mask_min_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { let min = _mm_min_ps(a, b).as_f32x4(); @@ -2489,6 +2702,7 @@ pub unsafe fn _mm_mask_min_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_ps&expand=3762) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminps))] pub unsafe fn _mm_maskz_min_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { let min = _mm_min_ps(a, b).as_f32x4(); @@ -2503,6 +2717,7 @@ pub unsafe fn _mm_maskz_min_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_min_pd&expand=3759) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminpd))] pub unsafe fn _mm512_min_pd(a: __m512d, b: __m512d) -> __m512d { transmute(vminpd(a.as_f64x8(), b.as_f64x8(), _MM_FROUND_CUR_DIRECTION)) @@ -2513,6 +2728,7 @@ pub unsafe fn _mm512_min_pd(a: __m512d, b: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mask_min_pd&expand=3757) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminpd))] pub unsafe fn _mm512_mask_min_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { let min = _mm512_min_pd(a, b).as_f64x8(); @@ -2524,6 +2740,7 @@ pub unsafe fn _mm512_mask_min_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_maskz_min_pd&expand=3758) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminpd))] pub unsafe fn _mm512_maskz_min_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { let min = _mm512_min_pd(a, b).as_f64x8(); @@ -2536,6 +2753,7 @@ pub unsafe fn _mm512_maskz_min_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_pd&expand=3754) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminpd))] pub unsafe fn _mm256_mask_min_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { let min = _mm256_min_pd(a, b).as_f64x4(); @@ -2547,6 +2765,7 @@ pub unsafe fn _mm256_mask_min_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_pd&expand=3755) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminpd))] pub unsafe fn _mm256_maskz_min_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { let min = _mm256_min_pd(a, b).as_f64x4(); @@ -2559,6 +2778,7 @@ pub unsafe fn _mm256_maskz_min_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_pd&expand=3751) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminpd))] pub unsafe fn _mm_mask_min_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let min = _mm_min_pd(a, b).as_f64x2(); @@ -2570,6 +2790,7 @@ pub unsafe fn _mm_mask_min_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_pd&expand=3752) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminpd))] pub unsafe fn _mm_maskz_min_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let min = _mm_min_pd(a, b).as_f64x2(); @@ -2582,6 +2803,7 @@ pub unsafe fn _mm_maskz_min_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_epu32&expand=3732) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminud))] pub unsafe fn _mm512_min_epu32(a: __m512i, b: __m512i) -> __m512i { transmute(vpminud(a.as_u32x16(), b.as_u32x16())) @@ -2592,6 +2814,7 @@ pub unsafe fn _mm512_min_epu32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_epu32&expand=3730) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminud))] pub unsafe fn _mm512_mask_min_epu32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let min = _mm512_min_epu32(a, b).as_u32x16(); @@ -2603,6 +2826,7 @@ pub unsafe fn _mm512_mask_min_epu32(src: __m512i, k: __mmask16, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_epu32&expand=3731) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminud))] pub unsafe fn _mm512_maskz_min_epu32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let min = _mm512_min_epu32(a, b).as_u32x16(); @@ -2615,6 +2839,7 @@ pub unsafe fn _mm512_maskz_min_epu32(k: __mmask16, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_epu32&expand=3727) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminud))] pub unsafe fn _mm256_mask_min_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let min = _mm256_min_epu32(a, b).as_u32x8(); @@ -2626,6 +2851,7 @@ pub unsafe fn _mm256_mask_min_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_epu32&expand=3728) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminud))] pub unsafe fn _mm256_maskz_min_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let min = _mm256_min_epu32(a, b).as_u32x8(); @@ -2638,6 +2864,7 @@ pub unsafe fn _mm256_maskz_min_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_epu32&expand=3724) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminud))] pub unsafe fn _mm_mask_min_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let min = _mm_min_epu32(a, b).as_u32x4(); @@ -2649,6 +2876,7 @@ pub unsafe fn _mm_mask_min_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_epu32&expand=3725) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminud))] pub unsafe fn _mm_maskz_min_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let min = _mm_min_epu32(a, b).as_u32x4(); @@ -2661,6 +2889,7 @@ pub unsafe fn _mm_maskz_min_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_epu64&expand=3741) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminuq))] pub unsafe fn _mm512_min_epu64(a: __m512i, b: __m512i) -> __m512i { transmute(vpminuq(a.as_u64x8(), b.as_u64x8())) @@ -2671,6 +2900,7 @@ pub unsafe fn _mm512_min_epu64(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_epu64&expand=3739) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminuq))] pub unsafe fn _mm512_mask_min_epu64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let min = _mm512_min_epu64(a, b).as_u64x8(); @@ -2682,6 +2912,7 @@ pub unsafe fn _mm512_mask_min_epu64(src: __m512i, k: __mmask8, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_epu64&expand=3740) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminuq))] pub unsafe fn _mm512_maskz_min_epu64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let min = _mm512_min_epu64(a, b).as_u64x8(); @@ -2694,6 +2925,7 @@ pub unsafe fn _mm512_maskz_min_epu64(k: __mmask8, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_min_epu64&expand=3738) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminuq))] pub unsafe fn _mm256_min_epu64(a: __m256i, b: __m256i) -> __m256i { transmute(vpminuq256(a.as_u64x4(), b.as_u64x4())) @@ -2704,6 +2936,7 @@ pub unsafe fn _mm256_min_epu64(a: __m256i, b: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_epu64&expand=3736) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminuq))] pub unsafe fn _mm256_mask_min_epu64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let min = _mm256_min_epu64(a, b).as_u64x4(); @@ -2715,6 +2948,7 @@ pub unsafe fn _mm256_mask_min_epu64(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_epu64&expand=3737) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminuq))] pub unsafe fn _mm256_maskz_min_epu64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let min = _mm256_min_epu64(a, b).as_u64x4(); @@ -2727,6 +2961,7 @@ pub unsafe fn _mm256_maskz_min_epu64(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epu64&expand=3735) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminuq))] pub unsafe fn _mm_min_epu64(a: __m128i, b: __m128i) -> __m128i { transmute(vpminuq128(a.as_u64x2(), b.as_u64x2())) @@ -2737,6 +2972,7 @@ pub unsafe fn _mm_min_epu64(a: __m128i, b: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_epu64&expand=3733) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminuq))] pub unsafe fn _mm_mask_min_epu64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let min = _mm_min_epu64(a, b).as_u64x2(); @@ -2748,6 +2984,7 @@ pub unsafe fn _mm_mask_min_epu64(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_epu64&expand=3734) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpminuq))] pub unsafe fn _mm_maskz_min_epu64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let min = _mm_min_epu64(a, b).as_u64x2(); @@ -2760,6 +2997,7 @@ pub unsafe fn _mm_maskz_min_epu64(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sqrt_ps&expand=5371) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtps))] pub unsafe fn _mm512_sqrt_ps(a: __m512) -> __m512 { transmute(vsqrtps(a.as_f32x16(), _MM_FROUND_CUR_DIRECTION)) @@ -2770,6 +3008,7 @@ pub unsafe fn _mm512_sqrt_ps(a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sqrt_ps&expand=5369) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtps))] pub unsafe fn _mm512_mask_sqrt_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { let sqrt = _mm512_sqrt_ps(a).as_f32x16(); @@ -2781,6 +3020,7 @@ pub unsafe fn _mm512_mask_sqrt_ps(src: __m512, k: __mmask16, a: __m512) -> __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sqrt_ps&expand=5370) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtps))] pub unsafe fn _mm512_maskz_sqrt_ps(k: __mmask16, a: __m512) -> __m512 { let sqrt = _mm512_sqrt_ps(a).as_f32x16(); @@ -2793,6 +3033,7 @@ pub unsafe fn _mm512_maskz_sqrt_ps(k: __mmask16, a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sqrt_ps&expand=5366) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtps))] pub unsafe fn _mm256_mask_sqrt_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { let sqrt = _mm256_sqrt_ps(a).as_f32x8(); @@ -2804,6 +3045,7 @@ pub unsafe fn _mm256_mask_sqrt_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sqrt_ps&expand=5367) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtps))] pub unsafe fn _mm256_maskz_sqrt_ps(k: __mmask8, a: __m256) -> __m256 { let sqrt = _mm256_sqrt_ps(a).as_f32x8(); @@ -2816,6 +3058,7 @@ pub unsafe fn _mm256_maskz_sqrt_ps(k: __mmask8, a: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sqrt_ps&expand=5363) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtps))] pub unsafe fn _mm_mask_sqrt_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { let sqrt = _mm_sqrt_ps(a).as_f32x4(); @@ -2827,6 +3070,7 @@ pub unsafe fn _mm_mask_sqrt_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sqrt_ps&expand=5364) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtps))] pub unsafe fn _mm_maskz_sqrt_ps(k: __mmask8, a: __m128) -> __m128 { let sqrt = _mm_sqrt_ps(a).as_f32x4(); @@ -2839,6 +3083,7 @@ pub unsafe fn _mm_maskz_sqrt_ps(k: __mmask8, a: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sqrt_pd&expand=5362) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtpd))] pub unsafe fn _mm512_sqrt_pd(a: __m512d) -> __m512d { transmute(vsqrtpd(a.as_f64x8(), _MM_FROUND_CUR_DIRECTION)) @@ -2849,6 +3094,7 @@ pub unsafe fn _mm512_sqrt_pd(a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sqrt_pd&expand=5360) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtpd))] pub unsafe fn _mm512_mask_sqrt_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { let sqrt = _mm512_sqrt_pd(a).as_f64x8(); @@ -2860,6 +3106,7 @@ pub unsafe fn _mm512_mask_sqrt_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sqrt_pd&expand=5361) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtpd))] pub unsafe fn _mm512_maskz_sqrt_pd(k: __mmask8, a: __m512d) -> __m512d { let sqrt = _mm512_sqrt_pd(a).as_f64x8(); @@ -2872,6 +3119,7 @@ pub unsafe fn _mm512_maskz_sqrt_pd(k: __mmask8, a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sqrt_pd&expand=5357) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtpd))] pub unsafe fn _mm256_mask_sqrt_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { let sqrt = _mm256_sqrt_pd(a).as_f64x4(); @@ -2883,6 +3131,7 @@ pub unsafe fn _mm256_mask_sqrt_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sqrt_pd&expand=5358) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtpd))] pub unsafe fn _mm256_maskz_sqrt_pd(k: __mmask8, a: __m256d) -> __m256d { let sqrt = _mm256_sqrt_pd(a).as_f64x4(); @@ -2895,6 +3144,7 @@ pub unsafe fn _mm256_maskz_sqrt_pd(k: __mmask8, a: __m256d) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sqrt_pd&expand=5354) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtpd))] pub unsafe fn _mm_mask_sqrt_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { let sqrt = _mm_sqrt_pd(a).as_f64x2(); @@ -2906,6 +3156,7 @@ pub unsafe fn _mm_mask_sqrt_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sqrt_pd&expand=5355) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtpd))] pub unsafe fn _mm_maskz_sqrt_pd(k: __mmask8, a: __m128d) -> __m128d { let sqrt = _mm_sqrt_pd(a).as_f64x2(); @@ -2918,6 +3169,7 @@ pub unsafe fn _mm_maskz_sqrt_pd(k: __mmask8, a: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmadd_ps&expand=2557) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps pub unsafe fn _mm512_fmadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 { transmute(vfmadd132ps(a.as_f32x16(), b.as_f32x16(), c.as_f32x16())) @@ -2928,6 +3180,7 @@ pub unsafe fn _mm512_fmadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmadd_ps&expand=2558) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps pub unsafe fn _mm512_mask_fmadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { let fmadd = _mm512_fmadd_ps(a, b, c).as_f32x16(); @@ -2939,6 +3192,7 @@ pub unsafe fn _mm512_mask_fmadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmadd_ps&expand=2560) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps pub unsafe fn _mm512_maskz_fmadd_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { let fmadd = _mm512_fmadd_ps(a, b, c).as_f32x16(); @@ -2951,6 +3205,7 @@ pub unsafe fn _mm512_maskz_fmadd_ps(k: __mmask16, a: __m512, b: __m512, c: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmadd_ps&expand=2559) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps pub unsafe fn _mm512_mask3_fmadd_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { let fmadd = _mm512_fmadd_ps(a, b, c).as_f32x16(); @@ -2962,6 +3217,7 @@ pub unsafe fn _mm512_mask3_fmadd_ps(a: __m512, b: __m512, c: __m512, k: __mmask1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fmadd_ps&expand=2554) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps pub unsafe fn _mm256_mask_fmadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { let fmadd = _mm256_fmadd_ps(a, b, c).as_f32x8(); @@ -2973,6 +3229,7 @@ pub unsafe fn _mm256_mask_fmadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fmadd_ps&expand=2556) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps pub unsafe fn _mm256_maskz_fmadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { let fmadd = _mm256_fmadd_ps(a, b, c).as_f32x8(); @@ -2985,6 +3242,7 @@ pub unsafe fn _mm256_maskz_fmadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fmadd_ps&expand=2555) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps pub unsafe fn _mm256_mask3_fmadd_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { let fmadd = _mm256_fmadd_ps(a, b, c).as_f32x8(); @@ -2996,6 +3254,7 @@ pub unsafe fn _mm256_mask3_fmadd_ps(a: __m256, b: __m256, c: __m256, k: __mmask8 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fmadd_ps&expand=2550) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps pub unsafe fn _mm_mask_fmadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { let fmadd = _mm_fmadd_ps(a, b, c).as_f32x4(); @@ -3007,6 +3266,7 @@ pub unsafe fn _mm_mask_fmadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fmadd_ps&expand=2552) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps pub unsafe fn _mm_maskz_fmadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { let fmadd = _mm_fmadd_ps(a, b, c).as_f32x4(); @@ -3019,6 +3279,7 @@ pub unsafe fn _mm_maskz_fmadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fmadd_ps&expand=2551) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps pub unsafe fn _mm_mask3_fmadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { let fmadd = _mm_fmadd_ps(a, b, c).as_f32x4(); @@ -3030,6 +3291,7 @@ pub unsafe fn _mm_mask3_fmadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmadd_pd&expand=2545) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd pub unsafe fn _mm512_fmadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { transmute(vfmadd132pd(a.as_f64x8(), b.as_f64x8(), c.as_f64x8())) @@ -3040,6 +3302,7 @@ pub unsafe fn _mm512_fmadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmadd_pd&expand=2546) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd pub unsafe fn _mm512_mask_fmadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { let fmadd = _mm512_fmadd_pd(a, b, c).as_f64x8(); @@ -3051,6 +3314,7 @@ pub unsafe fn _mm512_mask_fmadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmadd_pd&expand=2548) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd pub unsafe fn _mm512_maskz_fmadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { let fmadd = _mm512_fmadd_pd(a, b, c).as_f64x8(); @@ -3063,6 +3327,7 @@ pub unsafe fn _mm512_maskz_fmadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmadd_pd&expand=2547) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd pub unsafe fn _mm512_mask3_fmadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { let fmadd = _mm512_fmadd_pd(a, b, c).as_f64x8(); @@ -3074,6 +3339,7 @@ pub unsafe fn _mm512_mask3_fmadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __mma /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fmadd_pd&expand=2542) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd pub unsafe fn _mm256_mask_fmadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { let fmadd = _mm256_fmadd_pd(a, b, c).as_f64x4(); @@ -3085,6 +3351,7 @@ pub unsafe fn _mm256_mask_fmadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fmadd_pd&expand=2544) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd pub unsafe fn _mm256_maskz_fmadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { let fmadd = _mm256_fmadd_pd(a, b, c).as_f64x4(); @@ -3097,6 +3364,7 @@ pub unsafe fn _mm256_maskz_fmadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fmadd_pd&expand=2543) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd pub unsafe fn _mm256_mask3_fmadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { let fmadd = _mm256_fmadd_pd(a, b, c).as_f64x4(); @@ -3108,6 +3376,7 @@ pub unsafe fn _mm256_mask3_fmadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __mma /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fmadd_pd&expand=2538) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd pub unsafe fn _mm_mask_fmadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { let fmadd = _mm_fmadd_pd(a, b, c).as_f64x2(); @@ -3119,6 +3388,7 @@ pub unsafe fn _mm_mask_fmadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fmadd_pd&expand=2540) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd pub unsafe fn _mm_maskz_fmadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { let fmadd = _mm_fmadd_pd(a, b, c).as_f64x2(); @@ -3131,6 +3401,7 @@ pub unsafe fn _mm_maskz_fmadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fmadd_pd&expand=2539) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd pub unsafe fn _mm_mask3_fmadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { let fmadd = _mm_fmadd_pd(a, b, c).as_f64x2(); @@ -3142,6 +3413,7 @@ pub unsafe fn _mm_mask3_fmadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmsub_ps&expand=2643) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub pub unsafe fn _mm512_fmsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 { let zero: f32x16 = mem::zeroed(); @@ -3154,6 +3426,7 @@ pub unsafe fn _mm512_fmsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmsub_ps&expand=2644) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub pub unsafe fn _mm512_mask_fmsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { let fmsub = _mm512_fmsub_ps(a, b, c).as_f32x16(); @@ -3165,6 +3438,7 @@ pub unsafe fn _mm512_mask_fmsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmsub_ps&expand=2646) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub pub unsafe fn _mm512_maskz_fmsub_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { let fmsub = _mm512_fmsub_ps(a, b, c).as_f32x16(); @@ -3177,6 +3451,7 @@ pub unsafe fn _mm512_maskz_fmsub_ps(k: __mmask16, a: __m512, b: __m512, c: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmsub_ps&expand=2645) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub pub unsafe fn _mm512_mask3_fmsub_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { let fmsub = _mm512_fmsub_ps(a, b, c).as_f32x16(); @@ -3188,6 +3463,7 @@ pub unsafe fn _mm512_mask3_fmsub_ps(a: __m512, b: __m512, c: __m512, k: __mmask1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fmsub_ps&expand=2640) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub pub unsafe fn _mm256_mask_fmsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { let fmsub = _mm256_fmsub_ps(a, b, c).as_f32x8(); @@ -3199,6 +3475,7 @@ pub unsafe fn _mm256_mask_fmsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fmsub_ps&expand=2642) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub pub unsafe fn _mm256_maskz_fmsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { let fmsub = _mm256_fmsub_ps(a, b, c).as_f32x8(); @@ -3211,6 +3488,7 @@ pub unsafe fn _mm256_maskz_fmsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fmsub_ps&expand=2641) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub pub unsafe fn _mm256_mask3_fmsub_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { let fmsub = _mm256_fmsub_ps(a, b, c).as_f32x8(); @@ -3222,6 +3500,7 @@ pub unsafe fn _mm256_mask3_fmsub_ps(a: __m256, b: __m256, c: __m256, k: __mmask8 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fmsub_ps&expand=2636) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub pub unsafe fn _mm_mask_fmsub_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { let fmsub = _mm_fmsub_ps(a, b, c).as_f32x4(); @@ -3233,6 +3512,7 @@ pub unsafe fn _mm_mask_fmsub_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fmsub_ps&expand=2638) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub pub unsafe fn _mm_maskz_fmsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { let fmsub = _mm_fmsub_ps(a, b, c).as_f32x4(); @@ -3245,6 +3525,7 @@ pub unsafe fn _mm_maskz_fmsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fmsub_ps&expand=2637) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub pub unsafe fn _mm_mask3_fmsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { let fmsub = _mm_fmsub_ps(a, b, c).as_f32x4(); @@ -3256,6 +3537,7 @@ pub unsafe fn _mm_mask3_fmsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmsub_pd&expand=2631) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub pub unsafe fn _mm512_fmsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { let zero: f64x8 = mem::zeroed(); @@ -3268,6 +3550,7 @@ pub unsafe fn _mm512_fmsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmsub_pd&expand=2632) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub pub unsafe fn _mm512_mask_fmsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { let fmsub = _mm512_fmsub_pd(a, b, c).as_f64x8(); @@ -3279,6 +3562,7 @@ pub unsafe fn _mm512_mask_fmsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmsub_pd&expand=2634) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub pub unsafe fn _mm512_maskz_fmsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { let fmsub = _mm512_fmsub_pd(a, b, c).as_f64x8(); @@ -3291,6 +3575,7 @@ pub unsafe fn _mm512_maskz_fmsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmsub_pd&expand=2633) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub pub unsafe fn _mm512_mask3_fmsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { let fmsub = _mm512_fmsub_pd(a, b, c).as_f64x8(); @@ -3302,6 +3587,7 @@ pub unsafe fn _mm512_mask3_fmsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __mma /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fmsub_pd&expand=2628) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub pub unsafe fn _mm256_mask_fmsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { let fmsub = _mm256_fmsub_pd(a, b, c).as_f64x4(); @@ -3313,6 +3599,7 @@ pub unsafe fn _mm256_mask_fmsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fmsub_pd&expand=2630) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub pub unsafe fn _mm256_maskz_fmsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { let fmsub = _mm256_fmsub_pd(a, b, c).as_f64x4(); @@ -3325,6 +3612,7 @@ pub unsafe fn _mm256_maskz_fmsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fmsub_pd&expand=2629) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub pub unsafe fn _mm256_mask3_fmsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { let fmsub = _mm256_fmsub_pd(a, b, c).as_f64x4(); @@ -3336,6 +3624,7 @@ pub unsafe fn _mm256_mask3_fmsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __mma /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fmsub_pd&expand=2624) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub pub unsafe fn _mm_mask_fmsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { let fmsub = _mm_fmsub_pd(a, b, c).as_f64x2(); @@ -3347,6 +3636,7 @@ pub unsafe fn _mm_mask_fmsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fmsub_pd&expand=2626) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub pub unsafe fn _mm_maskz_fmsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { let fmsub = _mm_fmsub_pd(a, b, c).as_f64x2(); @@ -3359,6 +3649,7 @@ pub unsafe fn _mm_maskz_fmsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fmsub_pd&expand=2625) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub pub unsafe fn _mm_mask3_fmsub_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { let fmsub = _mm_fmsub_pd(a, b, c).as_f64x2(); @@ -3370,6 +3661,7 @@ pub unsafe fn _mm_mask3_fmsub_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmaddsub_ps&expand=2611) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps pub unsafe fn _mm512_fmaddsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 { transmute(vfmaddsub213ps( @@ -3385,6 +3677,7 @@ pub unsafe fn _mm512_fmaddsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmaddsub_ps&expand=2612) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps pub unsafe fn _mm512_mask_fmaddsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { let fmaddsub = _mm512_fmaddsub_ps(a, b, c).as_f32x16(); @@ -3396,6 +3689,7 @@ pub unsafe fn _mm512_mask_fmaddsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmaddsub_ps&expand=2614) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps pub unsafe fn _mm512_maskz_fmaddsub_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { let fmaddsub = _mm512_fmaddsub_ps(a, b, c).as_f32x16(); @@ -3408,6 +3702,7 @@ pub unsafe fn _mm512_maskz_fmaddsub_ps(k: __mmask16, a: __m512, b: __m512, c: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmaddsub_ps&expand=2613) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps pub unsafe fn _mm512_mask3_fmaddsub_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { let fmaddsub = _mm512_fmaddsub_ps(a, b, c).as_f32x16(); @@ -3419,6 +3714,7 @@ pub unsafe fn _mm512_mask3_fmaddsub_ps(a: __m512, b: __m512, c: __m512, k: __mma /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fmaddsub_ps&expand=2608) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps pub unsafe fn _mm256_mask_fmaddsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { let fmaddsub = _mm256_fmaddsub_ps(a, b, c).as_f32x8(); @@ -3430,6 +3726,7 @@ pub unsafe fn _mm256_mask_fmaddsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fmaddsub_ps&expand=2610) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps pub unsafe fn _mm256_maskz_fmaddsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { let fmaddsub = _mm256_fmaddsub_ps(a, b, c).as_f32x8(); @@ -3442,6 +3739,7 @@ pub unsafe fn _mm256_maskz_fmaddsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fmaddsub_ps&expand=2609) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps pub unsafe fn _mm256_mask3_fmaddsub_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { let fmaddsub = _mm256_fmaddsub_ps(a, b, c).as_f32x8(); @@ -3453,6 +3751,7 @@ pub unsafe fn _mm256_mask3_fmaddsub_ps(a: __m256, b: __m256, c: __m256, k: __mma /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fmaddsub_ps&expand=2604) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps pub unsafe fn _mm_mask_fmaddsub_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { let fmaddsub = _mm_fmaddsub_ps(a, b, c).as_f32x4(); @@ -3464,6 +3763,7 @@ pub unsafe fn _mm_mask_fmaddsub_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/IntrinsicsGuide/#text=_mm_maskz_fmaddsub_ps&expand=2606) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps pub unsafe fn _mm_maskz_fmaddsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { let fmaddsub = _mm_fmaddsub_ps(a, b, c).as_f32x4(); @@ -3476,6 +3776,7 @@ pub unsafe fn _mm_maskz_fmaddsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fmaddsub_ps&expand=2605) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps pub unsafe fn _mm_mask3_fmaddsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { let fmaddsub = _mm_fmaddsub_ps(a, b, c).as_f32x4(); @@ -3487,6 +3788,7 @@ pub unsafe fn _mm_mask3_fmaddsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmaddsub_pd&expand=2599) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd pub unsafe fn _mm512_fmaddsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { transmute(vfmaddsub213pd( @@ -3502,6 +3804,7 @@ pub unsafe fn _mm512_fmaddsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmaddsub_pd&expand=2600) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd pub unsafe fn _mm512_mask_fmaddsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { let fmaddsub = _mm512_fmaddsub_pd(a, b, c).as_f64x8(); @@ -3513,6 +3816,7 @@ pub unsafe fn _mm512_mask_fmaddsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmaddsub_pd&expand=2602) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd pub unsafe fn _mm512_maskz_fmaddsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { let fmaddsub = _mm512_fmaddsub_pd(a, b, c).as_f64x8(); @@ -3525,6 +3829,7 @@ pub unsafe fn _mm512_maskz_fmaddsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmaddsub_ps&expand=2613) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd pub unsafe fn _mm512_mask3_fmaddsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { let fmaddsub = _mm512_fmaddsub_pd(a, b, c).as_f64x8(); @@ -3536,6 +3841,7 @@ pub unsafe fn _mm512_mask3_fmaddsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fmaddsub_pd&expand=2596) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd pub unsafe fn _mm256_mask_fmaddsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { let fmaddsub = _mm256_fmaddsub_pd(a, b, c).as_f64x4(); @@ -3547,6 +3853,7 @@ pub unsafe fn _mm256_mask_fmaddsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fmaddsub_pd&expand=2598) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd pub unsafe fn _mm256_maskz_fmaddsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { let fmaddsub = _mm256_fmaddsub_pd(a, b, c).as_f64x4(); @@ -3559,6 +3866,7 @@ pub unsafe fn _mm256_maskz_fmaddsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fmaddsub_pd&expand=2597) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd pub unsafe fn _mm256_mask3_fmaddsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { let fmaddsub = _mm256_fmaddsub_pd(a, b, c).as_f64x4(); @@ -3570,6 +3878,7 @@ pub unsafe fn _mm256_mask3_fmaddsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fmaddsub_pd&expand=2592) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd pub unsafe fn _mm_mask_fmaddsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { let fmaddsub = _mm_fmaddsub_pd(a, b, c).as_f64x2(); @@ -3581,6 +3890,7 @@ pub unsafe fn _mm_mask_fmaddsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fmaddsub_pd&expand=2594) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd pub unsafe fn _mm_maskz_fmaddsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { let fmaddsub = _mm_fmaddsub_pd(a, b, c).as_f64x2(); @@ -3593,6 +3903,7 @@ pub unsafe fn _mm_maskz_fmaddsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fmaddsub_pd&expand=2593) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd pub unsafe fn _mm_mask3_fmaddsub_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { let fmaddsub = _mm_fmaddsub_pd(a, b, c).as_f64x2(); @@ -3604,6 +3915,7 @@ pub unsafe fn _mm_mask3_fmaddsub_pd(a: __m128d, b: __m128d, c: __m128d, k: __mma /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmsubadd_ps&expand=2691) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps pub unsafe fn _mm512_fmsubadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 { let zero: f32x16 = mem::zeroed(); @@ -3621,6 +3933,7 @@ pub unsafe fn _mm512_fmsubadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmsubadd_ps&expand=2692) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps pub unsafe fn _mm512_mask_fmsubadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { let fmsubadd = _mm512_fmsubadd_ps(a, b, c).as_f32x16(); @@ -3632,6 +3945,7 @@ pub unsafe fn _mm512_mask_fmsubadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmsubadd_ps&expand=2694) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps pub unsafe fn _mm512_maskz_fmsubadd_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { let fmsubadd = _mm512_fmsubadd_ps(a, b, c).as_f32x16(); @@ -3644,6 +3958,7 @@ pub unsafe fn _mm512_maskz_fmsubadd_ps(k: __mmask16, a: __m512, b: __m512, c: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmsubadd_ps&expand=2693) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps pub unsafe fn _mm512_mask3_fmsubadd_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { let fmsubadd = _mm512_fmsubadd_ps(a, b, c).as_f32x16(); @@ -3655,6 +3970,7 @@ pub unsafe fn _mm512_mask3_fmsubadd_ps(a: __m512, b: __m512, c: __m512, k: __mma /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fmsubadd_ps&expand=2688) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps pub unsafe fn _mm256_mask_fmsubadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { let fmsubadd = _mm256_fmsubadd_ps(a, b, c).as_f32x8(); @@ -3666,6 +3982,7 @@ pub unsafe fn _mm256_mask_fmsubadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fmsubadd_ps&expand=2690) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps pub unsafe fn _mm256_maskz_fmsubadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { let fmsubadd = _mm256_fmsubadd_ps(a, b, c).as_f32x8(); @@ -3678,6 +3995,7 @@ pub unsafe fn _mm256_maskz_fmsubadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fmsubadd_ps&expand=2689) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps pub unsafe fn _mm256_mask3_fmsubadd_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { let fmsubadd = _mm256_fmsubadd_ps(a, b, c).as_f32x8(); @@ -3689,6 +4007,7 @@ pub unsafe fn _mm256_mask3_fmsubadd_ps(a: __m256, b: __m256, c: __m256, k: __mma /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fmsubadd_ps&expand=2684) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps pub unsafe fn _mm_mask_fmsubadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { let fmsubadd = _mm_fmsubadd_ps(a, b, c).as_f32x4(); @@ -3700,6 +4019,7 @@ pub unsafe fn _mm_mask_fmsubadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fmsubadd_ps&expand=2686) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps pub unsafe fn _mm_maskz_fmsubadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { let fmsubadd = _mm_fmsubadd_ps(a, b, c).as_f32x4(); @@ -3712,6 +4032,7 @@ pub unsafe fn _mm_maskz_fmsubadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fmsubadd_ps&expand=2685) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps pub unsafe fn _mm_mask3_fmsubadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { let fmsubadd = _mm_fmsubadd_ps(a, b, c).as_f32x4(); @@ -3723,6 +4044,7 @@ pub unsafe fn _mm_mask3_fmsubadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmsubadd_pd&expand=2679) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd pub unsafe fn _mm512_fmsubadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { let zero: f64x8 = mem::zeroed(); @@ -3740,6 +4062,7 @@ pub unsafe fn _mm512_fmsubadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmsubadd_pd&expand=2680) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd pub unsafe fn _mm512_mask_fmsubadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { let fmsubadd = _mm512_fmsubadd_pd(a, b, c).as_f64x8(); @@ -3751,6 +4074,7 @@ pub unsafe fn _mm512_mask_fmsubadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmsubadd_pd&expand=2682) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd pub unsafe fn _mm512_maskz_fmsubadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { let fmsubadd = _mm512_fmsubadd_pd(a, b, c).as_f64x8(); @@ -3763,6 +4087,7 @@ pub unsafe fn _mm512_maskz_fmsubadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmsubadd_pd&expand=2681) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd pub unsafe fn _mm512_mask3_fmsubadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { let fmsubadd = _mm512_fmsubadd_pd(a, b, c).as_f64x8(); @@ -3774,6 +4099,7 @@ pub unsafe fn _mm512_mask3_fmsubadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fmsubadd_pd&expand=2676) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd pub unsafe fn _mm256_mask_fmsubadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { let fmsubadd = _mm256_fmsubadd_pd(a, b, c).as_f64x4(); @@ -3785,6 +4111,7 @@ pub unsafe fn _mm256_mask_fmsubadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fmsubadd_pd&expand=2678) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd pub unsafe fn _mm256_maskz_fmsubadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { let fmsubadd = _mm256_fmsubadd_pd(a, b, c).as_f64x4(); @@ -3797,6 +4124,7 @@ pub unsafe fn _mm256_maskz_fmsubadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fmsubadd_pd&expand=2677) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd pub unsafe fn _mm256_mask3_fmsubadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { let fmsubadd = _mm256_fmsubadd_pd(a, b, c).as_f64x4(); @@ -3808,6 +4136,7 @@ pub unsafe fn _mm256_mask3_fmsubadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fmsubadd_pd&expand=2672) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd pub unsafe fn _mm_mask_fmsubadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { let fmsubadd = _mm_fmsubadd_pd(a, b, c).as_f64x2(); @@ -3819,6 +4148,7 @@ pub unsafe fn _mm_mask_fmsubadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fmsubadd_pd&expand=2674) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd pub unsafe fn _mm_maskz_fmsubadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { let fmsubadd = _mm_fmsubadd_pd(a, b, c).as_f64x2(); @@ -3831,6 +4161,7 @@ pub unsafe fn _mm_maskz_fmsubadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fmsubadd_pd&expand=2673) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd pub unsafe fn _mm_mask3_fmsubadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { let fmsubadd = _mm_fmsubadd_pd(a, b, c).as_f64x2(); @@ -3842,6 +4173,7 @@ pub unsafe fn _mm_mask3_fmsubadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mma /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fnmadd_ps&expand=2723) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps pub unsafe fn _mm512_fnmadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 { let zero: f32x16 = mem::zeroed(); @@ -3854,6 +4186,7 @@ pub unsafe fn _mm512_fnmadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fnmadd_ps&expand=2724) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps pub unsafe fn _mm512_mask_fnmadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { let fnmadd = _mm512_fnmadd_ps(a, b, c).as_f32x16(); @@ -3865,6 +4198,7 @@ pub unsafe fn _mm512_mask_fnmadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fnmadd_ps&expand=2726) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps pub unsafe fn _mm512_maskz_fnmadd_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { let fnmadd = _mm512_fnmadd_ps(a, b, c).as_f32x16(); @@ -3877,6 +4211,7 @@ pub unsafe fn _mm512_maskz_fnmadd_ps(k: __mmask16, a: __m512, b: __m512, c: __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fnmadd_ps&expand=2725) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps pub unsafe fn _mm512_mask3_fnmadd_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { let fnmadd = _mm512_fnmadd_ps(a, b, c).as_f32x16(); @@ -3888,6 +4223,7 @@ pub unsafe fn _mm512_mask3_fnmadd_ps(a: __m512, b: __m512, c: __m512, k: __mmask /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fnmadd_ps&expand=2720) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps pub unsafe fn _mm256_mask_fnmadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { let fnmadd = _mm256_fnmadd_ps(a, b, c).as_f32x8(); @@ -3899,6 +4235,7 @@ pub unsafe fn _mm256_mask_fnmadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fnmadd_ps&expand=2722) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps pub unsafe fn _mm256_maskz_fnmadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { let fnmadd = _mm256_fnmadd_ps(a, b, c).as_f32x8(); @@ -3911,6 +4248,7 @@ pub unsafe fn _mm256_maskz_fnmadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fnmadd_ps&expand=2721) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps pub unsafe fn _mm256_mask3_fnmadd_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { let fnmadd = _mm256_fnmadd_ps(a, b, c).as_f32x8(); @@ -3922,6 +4260,7 @@ pub unsafe fn _mm256_mask3_fnmadd_ps(a: __m256, b: __m256, c: __m256, k: __mmask /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fnmadd_ps&expand=2716) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps pub unsafe fn _mm_mask_fnmadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { let fnmadd = _mm_fnmadd_ps(a, b, c).as_f32x4(); @@ -3933,6 +4272,7 @@ pub unsafe fn _mm_mask_fnmadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fnmadd_ps&expand=2718) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps pub unsafe fn _mm_maskz_fnmadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { let fnmadd = _mm_fnmadd_ps(a, b, c).as_f32x4(); @@ -3945,6 +4285,7 @@ pub unsafe fn _mm_maskz_fnmadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fnmadd_ps&expand=2717) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps pub unsafe fn _mm_mask3_fnmadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { let fnmadd = _mm_fnmadd_ps(a, b, c).as_f32x4(); @@ -3956,6 +4297,7 @@ pub unsafe fn _mm_mask3_fnmadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fnmadd_pd&expand=2711) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd pub unsafe fn _mm512_fnmadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { let zero: f64x8 = mem::zeroed(); @@ -3968,6 +4310,7 @@ pub unsafe fn _mm512_fnmadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fnmadd_pd&expand=2712) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd pub unsafe fn _mm512_mask_fnmadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { let fnmadd = _mm512_fnmadd_pd(a, b, c).as_f64x8(); @@ -3979,6 +4322,7 @@ pub unsafe fn _mm512_mask_fnmadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fnmadd_pd&expand=2714) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd pub unsafe fn _mm512_maskz_fnmadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { let fnmadd = _mm512_fnmadd_pd(a, b, c).as_f64x8(); @@ -3991,6 +4335,7 @@ pub unsafe fn _mm512_maskz_fnmadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fnmadd_pd&expand=2713) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd pub unsafe fn _mm512_mask3_fnmadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { let fnmadd = _mm512_fnmadd_pd(a, b, c).as_f64x8(); @@ -4002,6 +4347,7 @@ pub unsafe fn _mm512_mask3_fnmadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __mm /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fnmadd_pd&expand=2708) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd pub unsafe fn _mm256_mask_fnmadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { let fnmadd = _mm256_fnmadd_pd(a, b, c).as_f64x4(); @@ -4013,6 +4359,7 @@ pub unsafe fn _mm256_mask_fnmadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fnmadd_pd&expand=2710) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd pub unsafe fn _mm256_maskz_fnmadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { let fnmadd = _mm256_fnmadd_pd(a, b, c).as_f64x4(); @@ -4025,6 +4372,7 @@ pub unsafe fn _mm256_maskz_fnmadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fnmadd_pd&expand=2709) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd pub unsafe fn _mm256_mask3_fnmadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { let fnmadd = _mm256_fnmadd_pd(a, b, c).as_f64x4(); @@ -4036,6 +4384,7 @@ pub unsafe fn _mm256_mask3_fnmadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __mm /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fnmadd_pd&expand=2704) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd pub unsafe fn _mm_mask_fnmadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { let fnmadd = _mm_fnmadd_pd(a, b, c).as_f64x2(); @@ -4047,6 +4396,7 @@ pub unsafe fn _mm_mask_fnmadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fnmadd_pd&expand=2706) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd pub unsafe fn _mm_maskz_fnmadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { let fnmadd = _mm_fnmadd_pd(a, b, c).as_f64x2(); @@ -4059,6 +4409,7 @@ pub unsafe fn _mm_maskz_fnmadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fnmadd_pd&expand=2705) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd pub unsafe fn _mm_mask3_fnmadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { let fnmadd = _mm_fnmadd_pd(a, b, c).as_f64x2(); @@ -4070,6 +4421,7 @@ pub unsafe fn _mm_mask3_fnmadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fnmsub_ps&expand=2771) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps pub unsafe fn _mm512_fnmsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 { let zero: f32x16 = mem::zeroed(); @@ -4083,6 +4435,7 @@ pub unsafe fn _mm512_fnmsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fnmsub_ps&expand=2772) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps pub unsafe fn _mm512_mask_fnmsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m512) -> __m512 { let fnmsub = _mm512_fnmsub_ps(a, b, c).as_f32x16(); @@ -4094,6 +4447,7 @@ pub unsafe fn _mm512_mask_fnmsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fnmsub_ps&expand=2774) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps pub unsafe fn _mm512_maskz_fnmsub_ps(k: __mmask16, a: __m512, b: __m512, c: __m512) -> __m512 { let fnmsub = _mm512_fnmsub_ps(a, b, c).as_f32x16(); @@ -4106,6 +4460,7 @@ pub unsafe fn _mm512_maskz_fnmsub_ps(k: __mmask16, a: __m512, b: __m512, c: __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fnmsub_ps&expand=2773) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps pub unsafe fn _mm512_mask3_fnmsub_ps(a: __m512, b: __m512, c: __m512, k: __mmask16) -> __m512 { let fnmsub = _mm512_fnmsub_ps(a, b, c).as_f32x16(); @@ -4117,6 +4472,7 @@ pub unsafe fn _mm512_mask3_fnmsub_ps(a: __m512, b: __m512, c: __m512, k: __mmask /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fnmsub_ps&expand=2768) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps pub unsafe fn _mm256_mask_fnmsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m256) -> __m256 { let fnmsub = _mm256_fnmsub_ps(a, b, c).as_f32x8(); @@ -4128,6 +4484,7 @@ pub unsafe fn _mm256_mask_fnmsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fnmsub_ps&expand=2770) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps pub unsafe fn _mm256_maskz_fnmsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m256) -> __m256 { let fnmsub = _mm256_fnmsub_ps(a, b, c).as_f32x8(); @@ -4140,6 +4497,7 @@ pub unsafe fn _mm256_maskz_fnmsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fnmsub_ps&expand=2769) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps pub unsafe fn _mm256_mask3_fnmsub_ps(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256 { let fnmsub = _mm256_fnmsub_ps(a, b, c).as_f32x8(); @@ -4151,6 +4509,7 @@ pub unsafe fn _mm256_mask3_fnmsub_ps(a: __m256, b: __m256, c: __m256, k: __mmask /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fnmsub_ps&expand=2764) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps pub unsafe fn _mm_mask_fnmsub_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { let fnmsub = _mm_fnmsub_ps(a, b, c).as_f32x4(); @@ -4162,6 +4521,7 @@ pub unsafe fn _mm_mask_fnmsub_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fnmsub_ps&expand=2766) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps pub unsafe fn _mm_maskz_fnmsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { let fnmsub = _mm_fnmsub_ps(a, b, c).as_f32x4(); @@ -4174,6 +4534,7 @@ pub unsafe fn _mm_maskz_fnmsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fnmsub_ps&expand=2765) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps pub unsafe fn _mm_mask3_fnmsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { let fnmsub = _mm_fnmsub_ps(a, b, c).as_f32x4(); @@ -4185,6 +4546,7 @@ pub unsafe fn _mm_mask3_fnmsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fnmsub_pd&expand=2759) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd pub unsafe fn _mm512_fnmsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { let zero: f64x8 = mem::zeroed(); @@ -4198,6 +4560,7 @@ pub unsafe fn _mm512_fnmsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fnmsub_pd&expand=2760) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd pub unsafe fn _mm512_mask_fnmsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m512d) -> __m512d { let fnmsub = _mm512_fnmsub_pd(a, b, c).as_f64x8(); @@ -4209,6 +4572,7 @@ pub unsafe fn _mm512_mask_fnmsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fnmsub_pd&expand=2762) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd pub unsafe fn _mm512_maskz_fnmsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m512d) -> __m512d { let fnmsub = _mm512_fnmsub_pd(a, b, c).as_f64x8(); @@ -4221,6 +4585,7 @@ pub unsafe fn _mm512_maskz_fnmsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fnmsub_pd&expand=2761) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd pub unsafe fn _mm512_mask3_fnmsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __mmask8) -> __m512d { let fnmsub = _mm512_fnmsub_pd(a, b, c).as_f64x8(); @@ -4232,6 +4597,7 @@ pub unsafe fn _mm512_mask3_fnmsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __mm /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fnmsub_pd&expand=2756) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd pub unsafe fn _mm256_mask_fnmsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m256d) -> __m256d { let fnmsub = _mm256_fnmsub_pd(a, b, c).as_f64x4(); @@ -4243,6 +4609,7 @@ pub unsafe fn _mm256_mask_fnmsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fnmsub_pd&expand=2758) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd pub unsafe fn _mm256_maskz_fnmsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m256d) -> __m256d { let fnmsub = _mm256_fnmsub_pd(a, b, c).as_f64x4(); @@ -4255,6 +4622,7 @@ pub unsafe fn _mm256_maskz_fnmsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fnmsub_pd&expand=2757) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd pub unsafe fn _mm256_mask3_fnmsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __mmask8) -> __m256d { let fnmsub = _mm256_fnmsub_pd(a, b, c).as_f64x4(); @@ -4266,6 +4634,7 @@ pub unsafe fn _mm256_mask3_fnmsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __mm /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fnmsub_pd&expand=2752) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd pub unsafe fn _mm_mask_fnmsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { let fnmsub = _mm_fnmsub_pd(a, b, c).as_f64x2(); @@ -4277,6 +4646,7 @@ pub unsafe fn _mm_mask_fnmsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fnmsub_pd&expand=2754) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd pub unsafe fn _mm_maskz_fnmsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { let fnmsub = _mm_fnmsub_pd(a, b, c).as_f64x2(); @@ -4289,6 +4659,7 @@ pub unsafe fn _mm_maskz_fnmsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fnmsub_pd&expand=2753) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd pub unsafe fn _mm_mask3_fnmsub_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { let fnmsub = _mm_fnmsub_pd(a, b, c).as_f64x2(); @@ -4300,6 +4671,7 @@ pub unsafe fn _mm_mask3_fnmsub_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rcp14_ps&expand=4502) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14ps))] pub unsafe fn _mm512_rcp14_ps(a: __m512) -> __m512 { transmute(vrcp14ps( @@ -4314,6 +4686,7 @@ pub unsafe fn _mm512_rcp14_ps(a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rcp14_ps&expand=4500) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14ps))] pub unsafe fn _mm512_mask_rcp14_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { transmute(vrcp14ps(a.as_f32x16(), src.as_f32x16(), k)) @@ -4324,6 +4697,7 @@ pub unsafe fn _mm512_mask_rcp14_ps(src: __m512, k: __mmask16, a: __m512) -> __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rcp14_ps&expand=4501) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14ps))] pub unsafe fn _mm512_maskz_rcp14_ps(k: __mmask16, a: __m512) -> __m512 { transmute(vrcp14ps(a.as_f32x16(), _mm512_setzero_ps().as_f32x16(), k)) @@ -4334,6 +4708,7 @@ pub unsafe fn _mm512_maskz_rcp14_ps(k: __mmask16, a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rcp14_ps&expand=4499) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14ps))] pub unsafe fn _mm256_rcp14_ps(a: __m256) -> __m256 { transmute(vrcp14ps256( @@ -4348,6 +4723,7 @@ pub unsafe fn _mm256_rcp14_ps(a: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rcp14_ps&expand=4497) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14ps))] pub unsafe fn _mm256_mask_rcp14_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { transmute(vrcp14ps256(a.as_f32x8(), src.as_f32x8(), k)) @@ -4358,6 +4734,7 @@ pub unsafe fn _mm256_mask_rcp14_ps(src: __m256, k: __mmask8, a: __m256) -> __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rcp14_ps&expand=4498) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14ps))] pub unsafe fn _mm256_maskz_rcp14_ps(k: __mmask8, a: __m256) -> __m256 { transmute(vrcp14ps256(a.as_f32x8(), _mm256_setzero_ps().as_f32x8(), k)) @@ -4368,6 +4745,7 @@ pub unsafe fn _mm256_maskz_rcp14_ps(k: __mmask8, a: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rcp14_ps&expand=4496) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14ps))] pub unsafe fn _mm_rcp14_ps(a: __m128) -> __m128 { transmute(vrcp14ps128( @@ -4382,6 +4760,7 @@ pub unsafe fn _mm_rcp14_ps(a: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rcp14_ps&expand=4494) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14ps))] pub unsafe fn _mm_mask_rcp14_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { transmute(vrcp14ps128(a.as_f32x4(), src.as_f32x4(), k)) @@ -4392,6 +4771,7 @@ pub unsafe fn _mm_mask_rcp14_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rcp14_ps&expand=4495) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14ps))] pub unsafe fn _mm_maskz_rcp14_ps(k: __mmask8, a: __m128) -> __m128 { transmute(vrcp14ps128(a.as_f32x4(), _mm_setzero_ps().as_f32x4(), k)) @@ -4402,6 +4782,7 @@ pub unsafe fn _mm_maskz_rcp14_ps(k: __mmask8, a: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rcp14_pd&expand=4493) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14pd))] pub unsafe fn _mm512_rcp14_pd(a: __m512d) -> __m512d { transmute(vrcp14pd( @@ -4416,6 +4797,7 @@ pub unsafe fn _mm512_rcp14_pd(a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rcp14_pd&expand=4491) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14pd))] pub unsafe fn _mm512_mask_rcp14_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { transmute(vrcp14pd(a.as_f64x8(), src.as_f64x8(), k)) @@ -4426,6 +4808,7 @@ pub unsafe fn _mm512_mask_rcp14_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rcp14_pd&expand=4492) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14pd))] pub unsafe fn _mm512_maskz_rcp14_pd(k: __mmask8, a: __m512d) -> __m512d { transmute(vrcp14pd(a.as_f64x8(), _mm512_setzero_pd().as_f64x8(), k)) @@ -4436,6 +4819,7 @@ pub unsafe fn _mm512_maskz_rcp14_pd(k: __mmask8, a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rcp14_pd&expand=4490) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14pd))] pub unsafe fn _mm256_rcp14_pd(a: __m256d) -> __m256d { transmute(vrcp14pd256( @@ -4450,6 +4834,7 @@ pub unsafe fn _mm256_rcp14_pd(a: __m256d) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rcp14_pd&expand=4488) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14pd))] pub unsafe fn _mm256_mask_rcp14_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { transmute(vrcp14pd256(a.as_f64x4(), src.as_f64x4(), k)) @@ -4460,6 +4845,7 @@ pub unsafe fn _mm256_mask_rcp14_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rcp14_pd&expand=4489) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14pd))] pub unsafe fn _mm256_maskz_rcp14_pd(k: __mmask8, a: __m256d) -> __m256d { transmute(vrcp14pd256(a.as_f64x4(), _mm256_setzero_pd().as_f64x4(), k)) @@ -4470,6 +4856,7 @@ pub unsafe fn _mm256_maskz_rcp14_pd(k: __mmask8, a: __m256d) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rcp14_pd&expand=4487) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14pd))] pub unsafe fn _mm_rcp14_pd(a: __m128d) -> __m128d { transmute(vrcp14pd128( @@ -4484,6 +4871,7 @@ pub unsafe fn _mm_rcp14_pd(a: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rcp14_pd&expand=4485) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14pd))] pub unsafe fn _mm_mask_rcp14_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { transmute(vrcp14pd128(a.as_f64x2(), src.as_f64x2(), k)) @@ -4494,6 +4882,7 @@ pub unsafe fn _mm_mask_rcp14_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rcp14_pd&expand=4486) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14pd))] pub unsafe fn _mm_maskz_rcp14_pd(k: __mmask8, a: __m128d) -> __m128d { transmute(vrcp14pd128(a.as_f64x2(), _mm_setzero_pd().as_f64x2(), k)) @@ -4504,6 +4893,7 @@ pub unsafe fn _mm_maskz_rcp14_pd(k: __mmask8, a: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rsqrt14_ps&expand=4819) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14ps))] pub unsafe fn _mm512_rsqrt14_ps(a: __m512) -> __m512 { transmute(vrsqrt14ps( @@ -4518,6 +4908,7 @@ pub unsafe fn _mm512_rsqrt14_ps(a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rsqrt14_ps&expand=4817) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14ps))] pub unsafe fn _mm512_mask_rsqrt14_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { transmute(vrsqrt14ps(a.as_f32x16(), src.as_f32x16(), k)) @@ -4528,6 +4919,7 @@ pub unsafe fn _mm512_mask_rsqrt14_ps(src: __m512, k: __mmask16, a: __m512) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rsqrt14_ps&expand=4818) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14ps))] pub unsafe fn _mm512_maskz_rsqrt14_ps(k: __mmask16, a: __m512) -> __m512 { transmute(vrsqrt14ps( @@ -4542,6 +4934,7 @@ pub unsafe fn _mm512_maskz_rsqrt14_ps(k: __mmask16, a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rsqrt14_ps&expand=4815) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14ps))] pub unsafe fn _mm256_mask_rsqrt14_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { transmute(vrsqrt14ps256(a.as_f32x8(), src.as_f32x8(), k)) @@ -4552,6 +4945,7 @@ pub unsafe fn _mm256_mask_rsqrt14_ps(src: __m256, k: __mmask8, a: __m256) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rsqrt14_ps&expand=4816) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14ps))] pub unsafe fn _mm256_maskz_rsqrt14_ps(k: __mmask8, a: __m256) -> __m256 { transmute(vrsqrt14ps256( @@ -4566,6 +4960,7 @@ pub unsafe fn _mm256_maskz_rsqrt14_ps(k: __mmask8, a: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rsqrt14_ps&expand=4813) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14ps))] pub unsafe fn _mm_mask_rsqrt14_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { transmute(vrsqrt14ps128(a.as_f32x4(), src.as_f32x4(), k)) @@ -4576,6 +4971,7 @@ pub unsafe fn _mm_mask_rsqrt14_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rsqrt14_ps&expand=4814) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14ps))] pub unsafe fn _mm_maskz_rsqrt14_ps(k: __mmask8, a: __m128) -> __m128 { transmute(vrsqrt14ps128(a.as_f32x4(), _mm_setzero_ps().as_f32x4(), k)) @@ -4586,6 +4982,7 @@ pub unsafe fn _mm_maskz_rsqrt14_ps(k: __mmask8, a: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rsqrt14_pd&expand=4812) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14pd))] pub unsafe fn _mm512_rsqrt14_pd(a: __m512d) -> __m512d { transmute(vrsqrt14pd( @@ -4600,6 +4997,7 @@ pub unsafe fn _mm512_rsqrt14_pd(a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rsqrt14_pd&expand=4810) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14pd))] pub unsafe fn _mm512_mask_rsqrt14_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { transmute(vrsqrt14pd(a.as_f64x8(), src.as_f64x8(), k)) @@ -4610,6 +5008,7 @@ pub unsafe fn _mm512_mask_rsqrt14_pd(src: __m512d, k: __mmask8, a: __m512d) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rsqrt14_pd&expand=4811) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14pd))] pub unsafe fn _mm512_maskz_rsqrt14_pd(k: __mmask8, a: __m512d) -> __m512d { transmute(vrsqrt14pd(a.as_f64x8(), _mm512_setzero_pd().as_f64x8(), k)) @@ -4620,6 +5019,7 @@ pub unsafe fn _mm512_maskz_rsqrt14_pd(k: __mmask8, a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rsqrt14_pd&expand=4808) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14pd))] pub unsafe fn _mm256_mask_rsqrt14_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { transmute(vrsqrt14pd256(a.as_f64x4(), src.as_f64x4(), k)) @@ -4630,6 +5030,7 @@ pub unsafe fn _mm256_mask_rsqrt14_pd(src: __m256d, k: __mmask8, a: __m256d) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rsqrt14_pd&expand=4809) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14pd))] pub unsafe fn _mm256_maskz_rsqrt14_pd(k: __mmask8, a: __m256d) -> __m256d { transmute(vrsqrt14pd256( @@ -4644,6 +5045,7 @@ pub unsafe fn _mm256_maskz_rsqrt14_pd(k: __mmask8, a: __m256d) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rsqrt14_pd&expand=4806) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14pd))] pub unsafe fn _mm_mask_rsqrt14_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { transmute(vrsqrt14pd128(a.as_f64x2(), src.as_f64x2(), k)) @@ -4654,6 +5056,7 @@ pub unsafe fn _mm_mask_rsqrt14_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rsqrt14_pd&expand=4807) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14pd))] pub unsafe fn _mm_maskz_rsqrt14_pd(k: __mmask8, a: __m128d) -> __m128d { transmute(vrsqrt14pd128(a.as_f64x2(), _mm_setzero_pd().as_f64x2(), k)) @@ -4664,6 +5067,7 @@ pub unsafe fn _mm_maskz_rsqrt14_pd(k: __mmask8, a: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_getexp_ps&expand=2844) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpps))] pub unsafe fn _mm512_getexp_ps(a: __m512) -> __m512 { transmute(vgetexpps( @@ -4679,6 +5083,7 @@ pub unsafe fn _mm512_getexp_ps(a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_getexp_ps&expand=2845) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpps))] pub unsafe fn _mm512_mask_getexp_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { transmute(vgetexpps( @@ -4694,6 +5099,7 @@ pub unsafe fn _mm512_mask_getexp_ps(src: __m512, k: __mmask16, a: __m512) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_getexp_ps&expand=2846) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpps))] pub unsafe fn _mm512_maskz_getexp_ps(k: __mmask16, a: __m512) -> __m512 { transmute(vgetexpps( @@ -4709,6 +5115,7 @@ pub unsafe fn _mm512_maskz_getexp_ps(k: __mmask16, a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_getexp_ps&expand=2841) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpps))] pub unsafe fn _mm256_getexp_ps(a: __m256) -> __m256 { transmute(vgetexpps256( @@ -4723,6 +5130,7 @@ pub unsafe fn _mm256_getexp_ps(a: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_getexp_ps&expand=2842) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpps))] pub unsafe fn _mm256_mask_getexp_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { transmute(vgetexpps256(a.as_f32x8(), src.as_f32x8(), k)) @@ -4733,6 +5141,7 @@ pub unsafe fn _mm256_mask_getexp_ps(src: __m256, k: __mmask8, a: __m256) -> __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_getexp_ps&expand=2843) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpps))] pub unsafe fn _mm256_maskz_getexp_ps(k: __mmask8, a: __m256) -> __m256 { transmute(vgetexpps256( @@ -4747,6 +5156,7 @@ pub unsafe fn _mm256_maskz_getexp_ps(k: __mmask8, a: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_getexp_ps&expand=2838) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpps))] pub unsafe fn _mm_getexp_ps(a: __m128) -> __m128 { transmute(vgetexpps128( @@ -4761,6 +5171,7 @@ pub unsafe fn _mm_getexp_ps(a: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_getexp_ps&expand=2839) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpps))] pub unsafe fn _mm_mask_getexp_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { transmute(vgetexpps128(a.as_f32x4(), src.as_f32x4(), k)) @@ -4771,6 +5182,7 @@ pub unsafe fn _mm_mask_getexp_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_getexp_ps&expand=2840) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpps))] pub unsafe fn _mm_maskz_getexp_ps(k: __mmask8, a: __m128) -> __m128 { transmute(vgetexpps128(a.as_f32x4(), _mm_setzero_ps().as_f32x4(), k)) @@ -4781,6 +5193,7 @@ pub unsafe fn _mm_maskz_getexp_ps(k: __mmask8, a: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_getexp_pd&expand=2835) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexppd))] pub unsafe fn _mm512_getexp_pd(a: __m512d) -> __m512d { transmute(vgetexppd( @@ -4796,6 +5209,7 @@ pub unsafe fn _mm512_getexp_pd(a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_getexp_pd&expand=2836) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexppd))] pub unsafe fn _mm512_mask_getexp_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { transmute(vgetexppd( @@ -4811,6 +5225,7 @@ pub unsafe fn _mm512_mask_getexp_pd(src: __m512d, k: __mmask8, a: __m512d) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_getexp_pd&expand=2837) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexppd))] pub unsafe fn _mm512_maskz_getexp_pd(k: __mmask8, a: __m512d) -> __m512d { transmute(vgetexppd( @@ -4826,6 +5241,7 @@ pub unsafe fn _mm512_maskz_getexp_pd(k: __mmask8, a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_getexp_pd&expand=2832) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexppd))] pub unsafe fn _mm256_getexp_pd(a: __m256d) -> __m256d { transmute(vgetexppd256( @@ -4840,6 +5256,7 @@ pub unsafe fn _mm256_getexp_pd(a: __m256d) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_getexp_pd&expand=2833) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexppd))] pub unsafe fn _mm256_mask_getexp_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { transmute(vgetexppd256(a.as_f64x4(), src.as_f64x4(), k)) @@ -4850,6 +5267,7 @@ pub unsafe fn _mm256_mask_getexp_pd(src: __m256d, k: __mmask8, a: __m256d) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_getexp_pd&expand=2834) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexppd))] pub unsafe fn _mm256_maskz_getexp_pd(k: __mmask8, a: __m256d) -> __m256d { transmute(vgetexppd256( @@ -4864,6 +5282,7 @@ pub unsafe fn _mm256_maskz_getexp_pd(k: __mmask8, a: __m256d) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_getexp_pd&expand=2829) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexppd))] pub unsafe fn _mm_getexp_pd(a: __m128d) -> __m128d { transmute(vgetexppd128( @@ -4878,6 +5297,7 @@ pub unsafe fn _mm_getexp_pd(a: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_getexp_pd&expand=2830) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexppd))] pub unsafe fn _mm_mask_getexp_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { transmute(vgetexppd128(a.as_f64x2(), src.as_f64x2(), k)) @@ -4888,6 +5308,7 @@ pub unsafe fn _mm_mask_getexp_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_getexp_pd&expand=2831) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexppd))] pub unsafe fn _mm_maskz_getexp_pd(k: __mmask8, a: __m128d) -> __m128d { transmute(vgetexppd128(a.as_f64x2(), _mm_setzero_pd().as_f64x2(), k)) @@ -4904,6 +5325,7 @@ pub unsafe fn _mm_maskz_getexp_pd(k: __mmask8, a: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_roundscale_ps&expand=4784) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_roundscale_ps(a: __m512) -> __m512 { @@ -4925,6 +5347,7 @@ pub unsafe fn _mm512_roundscale_ps(a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_roundscale_ps&expand=4782) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_roundscale_ps( @@ -4950,6 +5373,7 @@ pub unsafe fn _mm512_mask_roundscale_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_roundscale_ps&expand=4783) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_roundscale_ps(k: __mmask16, a: __m512) -> __m512 { @@ -4971,6 +5395,7 @@ pub unsafe fn _mm512_maskz_roundscale_ps(k: __mmask16, a: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_roundscale_ps&expand=4781) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 250))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_roundscale_ps(a: __m256) -> __m256 { @@ -4992,6 +5417,7 @@ pub unsafe fn _mm256_roundscale_ps(a: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_roundscale_ps&expand=4779) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_roundscale_ps( @@ -5017,6 +5443,7 @@ pub unsafe fn _mm256_mask_roundscale_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_roundscale_ps&expand=4780) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_roundscale_ps(k: __mmask8, a: __m256) -> __m256 { @@ -5038,6 +5465,7 @@ pub unsafe fn _mm256_maskz_roundscale_ps(k: __mmask8, a: __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_roundscale_ps&expand=4778) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 250))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_roundscale_ps(a: __m128) -> __m128 { @@ -5059,6 +5487,7 @@ pub unsafe fn _mm_roundscale_ps(a: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_roundscale_ps&expand=4776) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_roundscale_ps( @@ -5084,6 +5513,7 @@ pub unsafe fn _mm_mask_roundscale_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_roundscale_ps&expand=4777) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_roundscale_ps(k: __mmask8, a: __m128) -> __m128 { @@ -5105,6 +5535,7 @@ pub unsafe fn _mm_maskz_roundscale_ps(k: __mmask8, a: __m128) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_roundscale_pd&expand=4775) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_roundscale_pd(a: __m512d) -> __m512d { @@ -5126,6 +5557,7 @@ pub unsafe fn _mm512_roundscale_pd(a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_roundscale_pd&expand=4773) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_roundscale_pd( @@ -5151,6 +5583,7 @@ pub unsafe fn _mm512_mask_roundscale_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_roundscale_pd&expand=4774) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_roundscale_pd(k: __mmask8, a: __m512d) -> __m512d { @@ -5172,6 +5605,7 @@ pub unsafe fn _mm512_maskz_roundscale_pd(k: __mmask8, a: __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_roundscale_pd&expand=4772) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_roundscale_pd(a: __m256d) -> __m256d { @@ -5193,6 +5627,7 @@ pub unsafe fn _mm256_roundscale_pd(a: __m256d) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_roundscale_pd&expand=4770) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_roundscale_pd( @@ -5218,6 +5653,7 @@ pub unsafe fn _mm256_mask_roundscale_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_roundscale_pd&expand=4771) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_roundscale_pd(k: __mmask8, a: __m256d) -> __m256d { @@ -5239,6 +5675,7 @@ pub unsafe fn _mm256_maskz_roundscale_pd(k: __mmask8, a: __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_roundscale_pd&expand=4769) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_roundscale_pd(a: __m128d) -> __m128d { @@ -5260,6 +5697,7 @@ pub unsafe fn _mm_roundscale_pd(a: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_roundscale_pd&expand=4767) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_roundscale_pd( @@ -5285,6 +5723,7 @@ pub unsafe fn _mm_mask_roundscale_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_roundscale_pd&expand=4768) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_roundscale_pd(k: __mmask8, a: __m128d) -> __m128d { @@ -5300,6 +5739,7 @@ pub unsafe fn _mm_maskz_roundscale_pd(k: __mmask8, a: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_scalef_ps&expand=4883) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefps))] pub unsafe fn _mm512_scalef_ps(a: __m512, b: __m512) -> __m512 { transmute(vscalefps( @@ -5316,6 +5756,7 @@ pub unsafe fn _mm512_scalef_ps(a: __m512, b: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_scalef_ps&expand=4881) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefps))] pub unsafe fn _mm512_mask_scalef_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { transmute(vscalefps( @@ -5332,6 +5773,7 @@ pub unsafe fn _mm512_mask_scalef_ps(src: __m512, k: __mmask16, a: __m512, b: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_scalef_ps&expand=4882) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefps))] pub unsafe fn _mm512_maskz_scalef_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { transmute(vscalefps( @@ -5348,6 +5790,7 @@ pub unsafe fn _mm512_maskz_scalef_ps(k: __mmask16, a: __m512, b: __m512) -> __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_scalef_ps&expand=4880) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefps))] pub unsafe fn _mm256_scalef_ps(a: __m256, b: __m256) -> __m256 { transmute(vscalefps256( @@ -5363,6 +5806,7 @@ pub unsafe fn _mm256_scalef_ps(a: __m256, b: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_scalef_ps&expand=4878) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefps))] pub unsafe fn _mm256_mask_scalef_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { transmute(vscalefps256(a.as_f32x8(), b.as_f32x8(), src.as_f32x8(), k)) @@ -5373,6 +5817,7 @@ pub unsafe fn _mm256_mask_scalef_ps(src: __m256, k: __mmask8, a: __m256, b: __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_scalef_ps&expand=4879) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefps))] pub unsafe fn _mm256_maskz_scalef_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { transmute(vscalefps256( @@ -5388,6 +5833,7 @@ pub unsafe fn _mm256_maskz_scalef_ps(k: __mmask8, a: __m256, b: __m256) -> __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_scalef_ps&expand=4877) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefps))] pub unsafe fn _mm_scalef_ps(a: __m128, b: __m128) -> __m128 { transmute(vscalefps128( @@ -5403,6 +5849,7 @@ pub unsafe fn _mm_scalef_ps(a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_scalef_ps&expand=4875) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefps))] pub unsafe fn _mm_mask_scalef_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { transmute(vscalefps128(a.as_f32x4(), b.as_f32x4(), src.as_f32x4(), k)) @@ -5413,6 +5860,7 @@ pub unsafe fn _mm_mask_scalef_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_scalef_ps&expand=4876) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefps))] pub unsafe fn _mm_maskz_scalef_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { transmute(vscalefps128( @@ -5428,6 +5876,7 @@ pub unsafe fn _mm_maskz_scalef_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_scalef_pd&expand=4874) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefpd))] pub unsafe fn _mm512_scalef_pd(a: __m512d, b: __m512d) -> __m512d { transmute(vscalefpd( @@ -5444,6 +5893,7 @@ pub unsafe fn _mm512_scalef_pd(a: __m512d, b: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_scalef_pd&expand=4872) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefpd))] pub unsafe fn _mm512_mask_scalef_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m512d) -> __m512d { transmute(vscalefpd( @@ -5460,6 +5910,7 @@ pub unsafe fn _mm512_mask_scalef_pd(src: __m512d, k: __mmask8, a: __m512d, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_scalef_pd&expand=4873) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefpd))] pub unsafe fn _mm512_maskz_scalef_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { transmute(vscalefpd( @@ -5476,6 +5927,7 @@ pub unsafe fn _mm512_maskz_scalef_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_scalef_pd&expand=4871) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefpd))] pub unsafe fn _mm256_scalef_pd(a: __m256d, b: __m256d) -> __m256d { transmute(vscalefpd256( @@ -5491,6 +5943,7 @@ pub unsafe fn _mm256_scalef_pd(a: __m256d, b: __m256d) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_scalef_pd&expand=4869) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefpd))] pub unsafe fn _mm256_mask_scalef_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m256d) -> __m256d { transmute(vscalefpd256(a.as_f64x4(), b.as_f64x4(), src.as_f64x4(), k)) @@ -5501,6 +5954,7 @@ pub unsafe fn _mm256_mask_scalef_pd(src: __m256d, k: __mmask8, a: __m256d, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_scalef_pd&expand=4870) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefpd))] pub unsafe fn _mm256_maskz_scalef_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { transmute(vscalefpd256( @@ -5516,6 +5970,7 @@ pub unsafe fn _mm256_maskz_scalef_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_scalef_pd&expand=4868) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefpd))] pub unsafe fn _mm_scalef_pd(a: __m128d, b: __m128d) -> __m128d { transmute(vscalefpd128( @@ -5531,6 +5986,7 @@ pub unsafe fn _mm_scalef_pd(a: __m128d, b: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_scalef_pd&expand=4866) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefpd))] pub unsafe fn _mm_mask_scalef_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { transmute(vscalefpd128(a.as_f64x2(), b.as_f64x2(), src.as_f64x2(), k)) @@ -5541,6 +5997,7 @@ pub unsafe fn _mm_mask_scalef_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_scalef_pd&expand=4867) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefpd))] pub unsafe fn _mm_maskz_scalef_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { transmute(vscalefpd128( @@ -5556,6 +6013,7 @@ pub unsafe fn _mm_maskz_scalef_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fixupimm_ps&expand=2499) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_fixupimm_ps(a: __m512, b: __m512, c: __m512i) -> __m512 { @@ -5572,6 +6030,7 @@ pub unsafe fn _mm512_fixupimm_ps(a: __m512, b: __m512, c: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fixupimm_ps&expand=2500) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_fixupimm_ps( @@ -5593,6 +6052,7 @@ pub unsafe fn _mm512_mask_fixupimm_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fixupimm_ps&expand=2501) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_maskz_fixupimm_ps( @@ -5614,6 +6074,7 @@ pub unsafe fn _mm512_maskz_fixupimm_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_fixupimm_ps&expand=2496) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_fixupimm_ps(a: __m256, b: __m256, c: __m256i) -> __m256 { @@ -5630,6 +6091,7 @@ pub unsafe fn _mm256_fixupimm_ps(a: __m256, b: __m256, c: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fixupimm_ps&expand=2497) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_fixupimm_ps( @@ -5651,6 +6113,7 @@ pub unsafe fn _mm256_mask_fixupimm_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fixupimm_ps&expand=2498) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_maskz_fixupimm_ps( @@ -5672,6 +6135,7 @@ pub unsafe fn _mm256_maskz_fixupimm_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fixupimm_ps&expand=2493) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_fixupimm_ps(a: __m128, b: __m128, c: __m128i) -> __m128 { @@ -5688,6 +6152,7 @@ pub unsafe fn _mm_fixupimm_ps(a: __m128, b: __m128, c: __m128i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fixupimm_ps&expand=2494) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_fixupimm_ps( @@ -5709,6 +6174,7 @@ pub unsafe fn _mm_mask_fixupimm_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fixupimm_ps&expand=2495) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_maskz_fixupimm_ps( @@ -5730,6 +6196,7 @@ pub unsafe fn _mm_maskz_fixupimm_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fixupimm_pd&expand=2490) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_fixupimm_pd(a: __m512d, b: __m512d, c: __m512i) -> __m512d { @@ -5746,6 +6213,7 @@ pub unsafe fn _mm512_fixupimm_pd(a: __m512d, b: __m512d, c: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fixupimm_pd&expand=2491) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_fixupimm_pd( @@ -5767,6 +6235,7 @@ pub unsafe fn _mm512_mask_fixupimm_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fixupimm_pd&expand=2492) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_maskz_fixupimm_pd( @@ -5788,6 +6257,7 @@ pub unsafe fn _mm512_maskz_fixupimm_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_fixupimm_pd&expand=2487) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_fixupimm_pd(a: __m256d, b: __m256d, c: __m256i) -> __m256d { @@ -5804,6 +6274,7 @@ pub unsafe fn _mm256_fixupimm_pd(a: __m256d, b: __m256d, c: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fixupimm_pd&expand=2488) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_fixupimm_pd( @@ -5825,6 +6296,7 @@ pub unsafe fn _mm256_mask_fixupimm_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fixupimm_pd&expand=2489) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_maskz_fixupimm_pd( @@ -5846,6 +6318,7 @@ pub unsafe fn _mm256_maskz_fixupimm_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fixupimm_pd&expand=2484) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_fixupimm_pd(a: __m128d, b: __m128d, c: __m128i) -> __m128d { @@ -5862,6 +6335,7 @@ pub unsafe fn _mm_fixupimm_pd(a: __m128d, b: __m128d, c: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fixupimm_pd&expand=2485) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_fixupimm_pd( @@ -5883,6 +6357,7 @@ pub unsafe fn _mm_mask_fixupimm_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fixupimm_pd&expand=2486) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_maskz_fixupimm_pd( @@ -5904,6 +6379,7 @@ pub unsafe fn _mm_maskz_fixupimm_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_ternarylogic_epi32&expand=5867) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_ternarylogic_epi32( @@ -5924,6 +6400,7 @@ pub unsafe fn _mm512_ternarylogic_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_ternarylogic_epi32&expand=5865) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_ternarylogic_epi32( @@ -5945,6 +6422,7 @@ pub unsafe fn _mm512_mask_ternarylogic_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_ternarylogic_epi32&expand=5866) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_maskz_ternarylogic_epi32( @@ -5967,6 +6445,7 @@ pub unsafe fn _mm512_maskz_ternarylogic_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_ternarylogic_epi32&expand=5864) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_ternarylogic_epi32( @@ -5987,6 +6466,7 @@ pub unsafe fn _mm256_ternarylogic_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_ternarylogic_epi32&expand=5862) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_ternarylogic_epi32( @@ -6008,6 +6488,7 @@ pub unsafe fn _mm256_mask_ternarylogic_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_ternarylogic_epi32&expand=5863) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_maskz_ternarylogic_epi32( @@ -6030,6 +6511,7 @@ pub unsafe fn _mm256_maskz_ternarylogic_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ternarylogic_epi32&expand=5861) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_ternarylogic_epi32( @@ -6050,6 +6532,7 @@ pub unsafe fn _mm_ternarylogic_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_ternarylogic_epi32&expand=5859) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_ternarylogic_epi32( @@ -6071,6 +6554,7 @@ pub unsafe fn _mm_mask_ternarylogic_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_ternarylogic_epi32&expand=5860) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_maskz_ternarylogic_epi32( @@ -6093,6 +6577,7 @@ pub unsafe fn _mm_maskz_ternarylogic_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_ternarylogic_epi64&expand=5876) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_ternarylogic_epi64( @@ -6113,6 +6598,7 @@ pub unsafe fn _mm512_ternarylogic_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_ternarylogic_epi64&expand=5874) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_ternarylogic_epi64( @@ -6134,6 +6620,7 @@ pub unsafe fn _mm512_mask_ternarylogic_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_ternarylogic_epi64&expand=5875) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_maskz_ternarylogic_epi64( @@ -6156,6 +6643,7 @@ pub unsafe fn _mm512_maskz_ternarylogic_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_ternarylogic_epi64&expand=5873) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_ternarylogic_epi64( @@ -6176,6 +6664,7 @@ pub unsafe fn _mm256_ternarylogic_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_ternarylogic_epi64&expand=5871) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_ternarylogic_epi64( @@ -6197,6 +6686,7 @@ pub unsafe fn _mm256_mask_ternarylogic_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_ternarylogic_epi64&expand=5872) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_maskz_ternarylogic_epi64( @@ -6219,6 +6709,7 @@ pub unsafe fn _mm256_maskz_ternarylogic_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ternarylogic_epi64&expand=5870) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_ternarylogic_epi64( @@ -6239,6 +6730,7 @@ pub unsafe fn _mm_ternarylogic_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_ternarylogic_epi64&expand=5868) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_ternarylogic_epi64( @@ -6260,6 +6752,7 @@ pub unsafe fn _mm_mask_ternarylogic_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_ternarylogic_epi64&expand=5869) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_maskz_ternarylogic_epi64( @@ -6291,6 +6784,7 @@ pub unsafe fn _mm_maskz_ternarylogic_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_getmant_ps&expand=2880) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(1, 2)] pub unsafe fn _mm512_getmant_ps< @@ -6327,6 +6821,7 @@ pub unsafe fn _mm512_getmant_ps< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_getmant_ps&expand=2881) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm512_mask_getmant_ps< @@ -6359,6 +6854,7 @@ pub unsafe fn _mm512_mask_getmant_ps< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_getmant_ps&expand=2882) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm512_maskz_getmant_ps< @@ -6390,6 +6886,7 @@ pub unsafe fn _mm512_maskz_getmant_ps< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_getmant_ps&expand=2877) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(1, 2)] pub unsafe fn _mm256_getmant_ps< @@ -6420,6 +6917,7 @@ pub unsafe fn _mm256_getmant_ps< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_getmant_ps&expand=2878) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm256_mask_getmant_ps< @@ -6452,6 +6950,7 @@ pub unsafe fn _mm256_mask_getmant_ps< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_getmant_ps&expand=2879) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm256_maskz_getmant_ps< @@ -6483,6 +6982,7 @@ pub unsafe fn _mm256_maskz_getmant_ps< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_getmant_ps&expand=2874) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(1, 2)] pub unsafe fn _mm_getmant_ps< @@ -6513,6 +7013,7 @@ pub unsafe fn _mm_getmant_ps< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_getmant_ps&expand=2875) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm_mask_getmant_ps< @@ -6545,6 +7046,7 @@ pub unsafe fn _mm_mask_getmant_ps< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_getmant_ps&expand=2876) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm_maskz_getmant_ps< @@ -6576,6 +7078,7 @@ pub unsafe fn _mm_maskz_getmant_ps< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_getmant_pd&expand=2871) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(1, 2)] pub unsafe fn _mm512_getmant_pd< @@ -6612,6 +7115,7 @@ pub unsafe fn _mm512_getmant_pd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_getmant_pd&expand=2872) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm512_mask_getmant_pd< @@ -6644,6 +7148,7 @@ pub unsafe fn _mm512_mask_getmant_pd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_getmant_pd&expand=2873) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm512_maskz_getmant_pd< @@ -6675,6 +7180,7 @@ pub unsafe fn _mm512_maskz_getmant_pd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_getmant_pd&expand=2868) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(1, 2)] pub unsafe fn _mm256_getmant_pd< @@ -6705,6 +7211,7 @@ pub unsafe fn _mm256_getmant_pd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_getmant_pd&expand=2869) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm256_mask_getmant_pd< @@ -6737,6 +7244,7 @@ pub unsafe fn _mm256_mask_getmant_pd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_getmant_pd&expand=2870) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm256_maskz_getmant_pd< @@ -6768,6 +7276,7 @@ pub unsafe fn _mm256_maskz_getmant_pd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_getmant_pd&expand=2865) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(1, 2)] pub unsafe fn _mm_getmant_pd< @@ -6798,6 +7307,7 @@ pub unsafe fn _mm_getmant_pd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_getmant_pd&expand=2866) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm_mask_getmant_pd< @@ -6830,6 +7340,7 @@ pub unsafe fn _mm_mask_getmant_pd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_getmant_pd&expand=2867) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm_maskz_getmant_pd< @@ -6859,6 +7370,7 @@ pub unsafe fn _mm_maskz_getmant_pd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_round_ps&expand=145) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddps, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_add_round_ps(a: __m512, b: __m512) -> __m512 { @@ -6881,6 +7393,7 @@ pub unsafe fn _mm512_add_round_ps(a: __m512, b: __m512) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_round_ps&expand=146) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddps, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_add_round_ps( @@ -6908,6 +7421,7 @@ pub unsafe fn _mm512_mask_add_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_round_ps&expand=147) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddps, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_add_round_ps( @@ -6935,6 +7449,7 @@ pub unsafe fn _mm512_maskz_add_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_round_pd&expand=142) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddpd, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_add_round_pd(a: __m512d, b: __m512d) -> __m512d { @@ -6957,6 +7472,7 @@ pub unsafe fn _mm512_add_round_pd(a: __m512d, b: __m512d) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_round_pd&expand=143) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddpd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_add_round_pd( @@ -6984,6 +7500,7 @@ pub unsafe fn _mm512_mask_add_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_round_pd&expand=144) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddpd, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_add_round_pd( @@ -7011,6 +7528,7 @@ pub unsafe fn _mm512_maskz_add_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_round_ps&expand=5739) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubps, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_sub_round_ps(a: __m512, b: __m512) -> __m512 { @@ -7033,6 +7551,7 @@ pub unsafe fn _mm512_sub_round_ps(a: __m512, b: __m512) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_round_ps&expand=5737) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubps, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_sub_round_ps( @@ -7060,6 +7579,7 @@ pub unsafe fn _mm512_mask_sub_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_round_ps&expand=5738) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubps, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_sub_round_ps( @@ -7087,6 +7607,7 @@ pub unsafe fn _mm512_maskz_sub_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_round_pd&expand=5736) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubpd, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_sub_round_pd(a: __m512d, b: __m512d) -> __m512d { @@ -7109,6 +7630,7 @@ pub unsafe fn _mm512_sub_round_pd(a: __m512d, b: __m512d) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_round_pd&expand=5734) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubpd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_sub_round_pd( @@ -7136,6 +7658,7 @@ pub unsafe fn _mm512_mask_sub_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_round_pd&expand=5735) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubpd, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_sub_round_pd( @@ -7163,6 +7686,7 @@ pub unsafe fn _mm512_maskz_sub_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mul_round_ps&expand=3940) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulps, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_mul_round_ps(a: __m512, b: __m512) -> __m512 { @@ -7185,6 +7709,7 @@ pub unsafe fn _mm512_mul_round_ps(a: __m512, b: __m512) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mul_round_ps&expand=3938) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulps, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_mul_round_ps( @@ -7212,6 +7737,7 @@ pub unsafe fn _mm512_mask_mul_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mul_round_ps&expand=3939) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulps, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_mul_round_ps( @@ -7239,6 +7765,7 @@ pub unsafe fn _mm512_maskz_mul_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mul_round_pd&expand=3937) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulpd, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_mul_round_pd(a: __m512d, b: __m512d) -> __m512d { @@ -7261,6 +7788,7 @@ pub unsafe fn _mm512_mul_round_pd(a: __m512d, b: __m512d) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mul_round_pd&expand=3935) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulpd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_mul_round_pd( @@ -7288,6 +7816,7 @@ pub unsafe fn _mm512_mask_mul_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mul_round_ps&expand=3939) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulpd, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_mul_round_pd( @@ -7315,6 +7844,7 @@ pub unsafe fn _mm512_maskz_mul_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_div_round_ps&expand=2168) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivps, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_div_round_ps(a: __m512, b: __m512) -> __m512 { @@ -7337,6 +7867,7 @@ pub unsafe fn _mm512_div_round_ps(a: __m512, b: __m512) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_div_round_ps&expand=2169) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivps, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_div_round_ps( @@ -7364,6 +7895,7 @@ pub unsafe fn _mm512_mask_div_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_div_round_ps&expand=2170) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivps, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_div_round_ps( @@ -7391,6 +7923,7 @@ pub unsafe fn _mm512_maskz_div_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_div_round_pd&expand=2165) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivpd, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_div_round_pd(a: __m512d, b: __m512d) -> __m512d { @@ -7413,6 +7946,7 @@ pub unsafe fn _mm512_div_round_pd(a: __m512d, b: __m512d) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_div_round_pd&expand=2166) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivpd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_div_round_pd( @@ -7440,6 +7974,7 @@ pub unsafe fn _mm512_mask_div_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_div_round_pd&expand=2167) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivpd, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_div_round_pd( @@ -7467,6 +8002,7 @@ pub unsafe fn _mm512_maskz_div_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sqrt_round_ps&expand=5377) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtps, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_sqrt_round_ps(a: __m512) -> __m512 { @@ -7488,6 +8024,7 @@ pub unsafe fn _mm512_sqrt_round_ps(a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sqrt_round_ps&expand=5375) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtps, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_sqrt_round_ps( @@ -7513,6 +8050,7 @@ pub unsafe fn _mm512_mask_sqrt_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sqrt_round_ps&expand=5376) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtps, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_sqrt_round_ps(k: __mmask16, a: __m512) -> __m512 { @@ -7535,6 +8073,7 @@ pub unsafe fn _mm512_maskz_sqrt_round_ps(k: __mmask16, a: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sqrt_round_pd&expand=5374) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtpd, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_sqrt_round_pd(a: __m512d) -> __m512d { @@ -7556,6 +8095,7 @@ pub unsafe fn _mm512_sqrt_round_pd(a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sqrt_round_pd&expand=5372) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtpd, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_sqrt_round_pd( @@ -7581,6 +8121,7 @@ pub unsafe fn _mm512_mask_sqrt_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sqrt_round_pd&expand=5373) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtpd, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_sqrt_round_pd(k: __mmask8, a: __m512d) -> __m512d { @@ -7603,6 +8144,7 @@ pub unsafe fn _mm512_maskz_sqrt_round_pd(k: __mmask8, a: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmadd_round_ps&expand=2565) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132ps or vfmadd213ps or vfmadd231ps #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_fmadd_round_ps( @@ -7630,6 +8172,7 @@ pub unsafe fn _mm512_fmadd_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmadd_round_ps&expand=2566) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132ps or vfmadd213ps or vfmadd231ps #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_fmadd_round_ps( @@ -7658,6 +8201,7 @@ pub unsafe fn _mm512_mask_fmadd_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmadd_round_ps&expand=2568) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132ps or vfmadd213ps or vfmadd231ps #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_maskz_fmadd_round_ps( @@ -7687,6 +8231,7 @@ pub unsafe fn _mm512_maskz_fmadd_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmadd_round_ps&expand=2567) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132ps or vfmadd213ps or vfmadd231ps #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask3_fmadd_round_ps( @@ -7715,6 +8260,7 @@ pub unsafe fn _mm512_mask3_fmadd_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmadd_round_pd&expand=2561) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132pd or vfmadd213pd or vfmadd231pd #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_fmadd_round_pd( @@ -7742,6 +8288,7 @@ pub unsafe fn _mm512_fmadd_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmadd_round_pd&expand=2562) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132pd or vfmadd213pd or vfmadd231pd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_fmadd_round_pd( @@ -7770,6 +8317,7 @@ pub unsafe fn _mm512_mask_fmadd_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmadd_round_pd&expand=2564) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132pd or vfmadd213pd or vfmadd231pd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_maskz_fmadd_round_pd( @@ -7799,6 +8347,7 @@ pub unsafe fn _mm512_maskz_fmadd_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmadd_round_pd&expand=2563) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132pd or vfmadd213pd or vfmadd231pd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask3_fmadd_round_pd( @@ -7827,6 +8376,7 @@ pub unsafe fn _mm512_mask3_fmadd_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmsub_round_ps&expand=2651) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generates vfmadd, gcc generates vfmsub #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_fmsub_round_ps( @@ -7855,6 +8405,7 @@ pub unsafe fn _mm512_fmsub_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmsub_round_ps&expand=2652) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generates vfmadd, gcc generates vfmsub #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_fmsub_round_ps( @@ -7884,6 +8435,7 @@ pub unsafe fn _mm512_mask_fmsub_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmsub_round_ps&expand=2654) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generates vfmadd, gcc generates vfmsub #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_maskz_fmsub_round_ps( @@ -7913,6 +8465,7 @@ pub unsafe fn _mm512_maskz_fmsub_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmsub_round_ps&expand=2653) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generates vfmadd, gcc generates vfmsub #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask3_fmsub_round_ps( @@ -7943,6 +8496,7 @@ pub unsafe fn _mm512_mask3_fmsub_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmsub_round_pd&expand=2647) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang generates fmadd, gcc generates fmsub #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_fmsub_round_pd( @@ -7971,6 +8525,7 @@ pub unsafe fn _mm512_fmsub_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmsub_round_pd&expand=2648) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang generates fmadd, gcc generates fmsub #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_fmsub_round_pd( @@ -8000,6 +8555,7 @@ pub unsafe fn _mm512_mask_fmsub_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmsub_round_pd&expand=2650) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang generates fmadd, gcc generates fmsub #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_maskz_fmsub_round_pd( @@ -8029,6 +8585,7 @@ pub unsafe fn _mm512_maskz_fmsub_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmsub_round_pd&expand=2649) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang generates fmadd, gcc generates fmsub #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask3_fmsub_round_pd( @@ -8059,6 +8616,7 @@ pub unsafe fn _mm512_mask3_fmsub_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmaddsub_round_ps&expand=2619) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_fmaddsub_round_ps( @@ -8086,6 +8644,7 @@ pub unsafe fn _mm512_fmaddsub_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmaddsub_round_ps&expand=2620) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_fmaddsub_round_ps( @@ -8114,6 +8673,7 @@ pub unsafe fn _mm512_mask_fmaddsub_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmaddsub_round_ps&expand=2622) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_maskz_fmaddsub_round_ps( @@ -8143,6 +8703,7 @@ pub unsafe fn _mm512_maskz_fmaddsub_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmaddsub_round_ps&expand=2621) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask3_fmaddsub_round_ps( @@ -8171,6 +8732,7 @@ pub unsafe fn _mm512_mask3_fmaddsub_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmaddsub_round_pd&expand=2615) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_fmaddsub_round_pd( @@ -8198,6 +8760,7 @@ pub unsafe fn _mm512_fmaddsub_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmaddsub_round_pd&expand=2616) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_fmaddsub_round_pd( @@ -8226,6 +8789,7 @@ pub unsafe fn _mm512_mask_fmaddsub_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmaddsub_round_pd&expand=2618) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_maskz_fmaddsub_round_pd( @@ -8255,6 +8819,7 @@ pub unsafe fn _mm512_maskz_fmaddsub_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmaddsub_round_pd&expand=2617) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask3_fmaddsub_round_pd( @@ -8283,6 +8848,7 @@ pub unsafe fn _mm512_mask3_fmaddsub_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmsubadd_round_ps&expand=2699) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_fmsubadd_round_ps( @@ -8311,6 +8877,7 @@ pub unsafe fn _mm512_fmsubadd_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmsubadd_round_ps&expand=2700) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_fmsubadd_round_ps( @@ -8340,6 +8907,7 @@ pub unsafe fn _mm512_mask_fmsubadd_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmsubadd_round_ps&expand=2702) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_maskz_fmsubadd_round_ps( @@ -8369,6 +8937,7 @@ pub unsafe fn _mm512_maskz_fmsubadd_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmsubadd_round_ps&expand=2701) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask3_fmsubadd_round_ps( @@ -8399,6 +8968,7 @@ pub unsafe fn _mm512_mask3_fmsubadd_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmsubadd_round_pd&expand=2695) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_fmsubadd_round_pd( @@ -8427,6 +8997,7 @@ pub unsafe fn _mm512_fmsubadd_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmsubadd_round_pd&expand=2696) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_fmsubadd_round_pd( @@ -8456,6 +9027,7 @@ pub unsafe fn _mm512_mask_fmsubadd_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmsubadd_round_pd&expand=2698) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_maskz_fmsubadd_round_pd( @@ -8485,6 +9057,7 @@ pub unsafe fn _mm512_maskz_fmsubadd_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmsubadd_round_pd&expand=2697) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask3_fmsubadd_round_pd( @@ -8515,6 +9088,7 @@ pub unsafe fn _mm512_mask3_fmsubadd_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fnmadd_round_ps&expand=2731) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_fnmadd_round_ps( @@ -8543,6 +9117,7 @@ pub unsafe fn _mm512_fnmadd_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fnmadd_round_ps&expand=2732) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_fnmadd_round_ps( @@ -8572,6 +9147,7 @@ pub unsafe fn _mm512_mask_fnmadd_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fnmadd_round_ps&expand=2734) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_maskz_fnmadd_round_ps( @@ -8601,6 +9177,7 @@ pub unsafe fn _mm512_maskz_fnmadd_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fnmadd_round_ps&expand=2733) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask3_fnmadd_round_ps( @@ -8630,6 +9207,7 @@ pub unsafe fn _mm512_mask3_fnmadd_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fnmadd_pd&expand=2711) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_fnmadd_round_pd( @@ -8658,6 +9236,7 @@ pub unsafe fn _mm512_fnmadd_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fnmadd_round_pd&expand=2728) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_fnmadd_round_pd( @@ -8688,6 +9267,7 @@ pub unsafe fn _mm512_mask_fnmadd_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fnmadd_round_pd&expand=2730) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_maskz_fnmadd_round_pd( @@ -8717,6 +9297,7 @@ pub unsafe fn _mm512_maskz_fnmadd_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fnmadd_round_pd&expand=2729) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask3_fnmadd_round_pd( @@ -8746,6 +9327,7 @@ pub unsafe fn _mm512_mask3_fnmadd_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fnmsub_round_ps&expand=2779) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_fnmsub_round_ps( @@ -8774,6 +9356,7 @@ pub unsafe fn _mm512_fnmsub_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fnmsub_round_ps&expand=2780) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_fnmsub_round_ps( @@ -8804,6 +9387,7 @@ pub unsafe fn _mm512_mask_fnmsub_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fnmsub_round_ps&expand=2782) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_maskz_fnmsub_round_ps( @@ -8833,6 +9417,7 @@ pub unsafe fn _mm512_maskz_fnmsub_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fnmsub_round_ps&expand=2781) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask3_fnmsub_round_ps( @@ -8863,6 +9448,7 @@ pub unsafe fn _mm512_mask3_fnmsub_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fnmsub_round_pd&expand=2775) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_fnmsub_round_pd( @@ -8891,6 +9477,7 @@ pub unsafe fn _mm512_fnmsub_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fnmsub_round_pd&expand=2776) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_fnmsub_round_pd( @@ -8921,6 +9508,7 @@ pub unsafe fn _mm512_mask_fnmsub_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fnmsub_round_pd&expand=2778) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_maskz_fnmsub_round_pd( @@ -8950,6 +9538,7 @@ pub unsafe fn _mm512_maskz_fnmsub_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fnmsub_round_pd&expand=2777) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask3_fnmsub_round_pd( @@ -8974,6 +9563,7 @@ pub unsafe fn _mm512_mask3_fnmsub_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_round_ps&expand=3662) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxps, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_max_round_ps(a: __m512, b: __m512) -> __m512 { @@ -8990,6 +9580,7 @@ pub unsafe fn _mm512_max_round_ps(a: __m512, b: __m512) -> __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_round_ps&expand=3660) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxps, SAE = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_max_round_ps( @@ -9011,6 +9602,7 @@ pub unsafe fn _mm512_mask_max_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_round_ps&expand=3661) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxps, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_max_round_ps( @@ -9032,6 +9624,7 @@ pub unsafe fn _mm512_maskz_max_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_round_pd&expand=3659) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxpd, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_max_round_pd(a: __m512d, b: __m512d) -> __m512d { @@ -9048,6 +9641,7 @@ pub unsafe fn _mm512_max_round_pd(a: __m512d, b: __m512d) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_round_pd&expand=3657) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxpd, SAE = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_max_round_pd( @@ -9069,6 +9663,7 @@ pub unsafe fn _mm512_mask_max_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_round_pd&expand=3658) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxpd, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_max_round_pd( @@ -9090,6 +9685,7 @@ pub unsafe fn _mm512_maskz_max_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_round_ps&expand=3776) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminps, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_min_round_ps(a: __m512, b: __m512) -> __m512 { @@ -9106,6 +9702,7 @@ pub unsafe fn _mm512_min_round_ps(a: __m512, b: __m512) -> __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_round_ps&expand=3774) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminps, SAE = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_min_round_ps( @@ -9127,6 +9724,7 @@ pub unsafe fn _mm512_mask_min_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_round_ps&expand=3775) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminps, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_min_round_ps( @@ -9148,6 +9746,7 @@ pub unsafe fn _mm512_maskz_min_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_round_pd&expand=3773) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminpd, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_min_round_pd(a: __m512d, b: __m512d) -> __m512d { @@ -9164,6 +9763,7 @@ pub unsafe fn _mm512_min_round_pd(a: __m512d, b: __m512d) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_round_pd&expand=3771) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminpd, SAE = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_min_round_pd( @@ -9185,6 +9785,7 @@ pub unsafe fn _mm512_mask_min_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_round_pd&expand=3772) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminpd, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_min_round_pd( @@ -9206,6 +9807,7 @@ pub unsafe fn _mm512_maskz_min_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_getexp_round_ps&expand=2850) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpps, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_getexp_round_ps(a: __m512) -> __m512 { @@ -9222,6 +9824,7 @@ pub unsafe fn _mm512_getexp_round_ps(a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_getexp_round_ps&expand=2851) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpps, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_getexp_round_ps( @@ -9242,6 +9845,7 @@ pub unsafe fn _mm512_mask_getexp_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_getexp_round_ps&expand=2852) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpps, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_getexp_round_ps(k: __mmask16, a: __m512) -> __m512 { @@ -9258,6 +9862,7 @@ pub unsafe fn _mm512_maskz_getexp_round_ps(k: __mmask16, a: __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_getexp_round_pd&expand=2847) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexppd, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_getexp_round_pd(a: __m512d) -> __m512d { @@ -9274,6 +9879,7 @@ pub unsafe fn _mm512_getexp_round_pd(a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_getexp_round_pd&expand=2848) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexppd, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_getexp_round_pd( @@ -9294,6 +9900,7 @@ pub unsafe fn _mm512_mask_getexp_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_getexp_round_pd&expand=2849) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexppd, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_getexp_round_pd(k: __mmask8, a: __m512d) -> __m512d { @@ -9316,6 +9923,7 @@ pub unsafe fn _mm512_maskz_getexp_round_pd(k: __mmask8, a: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_roundscale_round_ps&expand=4790) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(1, 2)] pub unsafe fn _mm512_roundscale_round_ps(a: __m512) -> __m512 { @@ -9339,6 +9947,7 @@ pub unsafe fn _mm512_roundscale_round_ps(a: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_roundscale_round_ps&expand=4788) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm512_mask_roundscale_round_ps( @@ -9366,6 +9975,7 @@ pub unsafe fn _mm512_mask_roundscale_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_roundscale_round_ps&expand=4789) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm512_maskz_roundscale_round_ps( @@ -9392,6 +10002,7 @@ pub unsafe fn _mm512_maskz_roundscale_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_roundscale_round_pd&expand=4787) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(1, 2)] pub unsafe fn _mm512_roundscale_round_pd(a: __m512d) -> __m512d { @@ -9415,6 +10026,7 @@ pub unsafe fn _mm512_roundscale_round_pd(a: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_roundscale_round_pd&expand=4785) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm512_mask_roundscale_round_pd( @@ -9442,6 +10054,7 @@ pub unsafe fn _mm512_mask_roundscale_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_roundscale_round_pd&expand=4786) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm512_maskz_roundscale_round_pd( @@ -9468,6 +10081,7 @@ pub unsafe fn _mm512_maskz_roundscale_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_scalef_round_ps&expand=4889) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefps, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_scalef_round_ps(a: __m512, b: __m512) -> __m512 { @@ -9491,6 +10105,7 @@ pub unsafe fn _mm512_scalef_round_ps(a: __m512, b: __m512) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_scalef_round_ps&expand=4887) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefps, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_scalef_round_ps( @@ -9519,6 +10134,7 @@ pub unsafe fn _mm512_mask_scalef_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_scalef_round_ps&expand=4888) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefps, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_scalef_round_ps( @@ -9546,6 +10162,7 @@ pub unsafe fn _mm512_maskz_scalef_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_scalef_round_pd&expand=4886) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefpd, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_scalef_round_pd(a: __m512d, b: __m512d) -> __m512d { @@ -9569,6 +10186,7 @@ pub unsafe fn _mm512_scalef_round_pd(a: __m512d, b: __m512d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_scalef_round_pd&expand=4884) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefpd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_scalef_round_pd( @@ -9597,6 +10215,7 @@ pub unsafe fn _mm512_mask_scalef_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_scalef_round_pd&expand=4885) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefpd, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_scalef_round_pd( @@ -9618,6 +10237,7 @@ pub unsafe fn _mm512_maskz_scalef_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fixupimm_round_ps&expand=2505) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm512_fixupimm_round_ps( @@ -9640,6 +10260,7 @@ pub unsafe fn _mm512_fixupimm_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fixupimm_round_ps&expand=2506) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(4, 5)] pub unsafe fn _mm512_mask_fixupimm_round_ps( @@ -9663,6 +10284,7 @@ pub unsafe fn _mm512_mask_fixupimm_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fixupimm_round_ps&expand=2507) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(4, 5)] pub unsafe fn _mm512_maskz_fixupimm_round_ps( @@ -9686,6 +10308,7 @@ pub unsafe fn _mm512_maskz_fixupimm_round_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fixupimm_round_pd&expand=2502) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm512_fixupimm_round_pd( @@ -9708,6 +10331,7 @@ pub unsafe fn _mm512_fixupimm_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fixupimm_round_pd&expand=2503) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(4, 5)] pub unsafe fn _mm512_mask_fixupimm_round_pd( @@ -9731,6 +10355,7 @@ pub unsafe fn _mm512_mask_fixupimm_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fixupimm_round_pd&expand=2504) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(4, 5)] pub unsafe fn _mm512_maskz_fixupimm_round_pd( @@ -9763,6 +10388,7 @@ pub unsafe fn _mm512_maskz_fixupimm_round_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_getmant_round_ps&expand=2886) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0, SAE = 4))] #[rustc_legacy_const_generics(1, 2, 3)] pub unsafe fn _mm512_getmant_round_ps< @@ -9796,6 +10422,7 @@ pub unsafe fn _mm512_getmant_round_ps< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_getmant_round_ps&expand=2887) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0, SAE = 4))] #[rustc_legacy_const_generics(3, 4, 5)] pub unsafe fn _mm512_mask_getmant_round_ps< @@ -9831,6 +10458,7 @@ pub unsafe fn _mm512_mask_getmant_round_ps< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_getmant_round_ps&expand=2888) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0, SAE = 4))] #[rustc_legacy_const_generics(2, 3, 4)] pub unsafe fn _mm512_maskz_getmant_round_ps< @@ -9865,6 +10493,7 @@ pub unsafe fn _mm512_maskz_getmant_round_ps< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_getmant_round_pd&expand=2883) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0, SAE = 4))] #[rustc_legacy_const_generics(1, 2, 3)] pub unsafe fn _mm512_getmant_round_pd< @@ -9898,6 +10527,7 @@ pub unsafe fn _mm512_getmant_round_pd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_getmant_round_pd&expand=2884) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0, SAE = 4))] #[rustc_legacy_const_generics(3, 4, 5)] pub unsafe fn _mm512_mask_getmant_round_pd< @@ -9933,6 +10563,7 @@ pub unsafe fn _mm512_mask_getmant_round_pd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_512_maskz_getmant_round_pd&expand=2885) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0, SAE = 4))] #[rustc_legacy_const_generics(2, 3, 4)] pub unsafe fn _mm512_maskz_getmant_round_pd< @@ -9957,6 +10588,7 @@ pub unsafe fn _mm512_maskz_getmant_round_pd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtps_epi32&expand=1737) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2dq))] pub unsafe fn _mm512_cvtps_epi32(a: __m512) -> __m512i { transmute(vcvtps2dq( @@ -9972,6 +10604,7 @@ pub unsafe fn _mm512_cvtps_epi32(a: __m512) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtps_epi32&expand=1738) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2dq))] pub unsafe fn _mm512_mask_cvtps_epi32(src: __m512i, k: __mmask16, a: __m512) -> __m512i { transmute(vcvtps2dq( @@ -9987,6 +10620,7 @@ pub unsafe fn _mm512_mask_cvtps_epi32(src: __m512i, k: __mmask16, a: __m512) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtps_epi32&expand=1739) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2dq))] pub unsafe fn _mm512_maskz_cvtps_epi32(k: __mmask16, a: __m512) -> __m512i { transmute(vcvtps2dq( @@ -10002,6 +10636,7 @@ pub unsafe fn _mm512_maskz_cvtps_epi32(k: __mmask16, a: __m512) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtps_epi32&expand=1735) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2dq))] pub unsafe fn _mm256_mask_cvtps_epi32(src: __m256i, k: __mmask8, a: __m256) -> __m256i { let convert = _mm256_cvtps_epi32(a); @@ -10013,6 +10648,7 @@ pub unsafe fn _mm256_mask_cvtps_epi32(src: __m256i, k: __mmask8, a: __m256) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtps_epi32&expand=1736) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2dq))] pub unsafe fn _mm256_maskz_cvtps_epi32(k: __mmask8, a: __m256) -> __m256i { let convert = _mm256_cvtps_epi32(a); @@ -10025,6 +10661,7 @@ pub unsafe fn _mm256_maskz_cvtps_epi32(k: __mmask8, a: __m256) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtps_epi32&expand=1732) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2dq))] pub unsafe fn _mm_mask_cvtps_epi32(src: __m128i, k: __mmask8, a: __m128) -> __m128i { let convert = _mm_cvtps_epi32(a); @@ -10036,6 +10673,7 @@ pub unsafe fn _mm_mask_cvtps_epi32(src: __m128i, k: __mmask8, a: __m128) -> __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtps_epi32&expand=1733) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2dq))] pub unsafe fn _mm_maskz_cvtps_epi32(k: __mmask8, a: __m128) -> __m128i { let convert = _mm_cvtps_epi32(a); @@ -10048,6 +10686,7 @@ pub unsafe fn _mm_maskz_cvtps_epi32(k: __mmask8, a: __m128) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtps_epu32&expand=1755) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2udq))] pub unsafe fn _mm512_cvtps_epu32(a: __m512) -> __m512i { transmute(vcvtps2udq( @@ -10063,6 +10702,7 @@ pub unsafe fn _mm512_cvtps_epu32(a: __m512) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtps_epu32&expand=1756) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2udq))] pub unsafe fn _mm512_mask_cvtps_epu32(src: __m512i, k: __mmask16, a: __m512) -> __m512i { transmute(vcvtps2udq( @@ -10078,6 +10718,7 @@ pub unsafe fn _mm512_mask_cvtps_epu32(src: __m512i, k: __mmask16, a: __m512) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundps_epu32&expand=1343) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2udq))] pub unsafe fn _mm512_maskz_cvtps_epu32(k: __mmask16, a: __m512) -> __m512i { transmute(vcvtps2udq( @@ -10093,6 +10734,7 @@ pub unsafe fn _mm512_maskz_cvtps_epu32(k: __mmask16, a: __m512) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtps_epu32&expand=1752) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2udq))] pub unsafe fn _mm256_cvtps_epu32(a: __m256) -> __m256i { transmute(vcvtps2udq256( @@ -10107,6 +10749,7 @@ pub unsafe fn _mm256_cvtps_epu32(a: __m256) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtps_epu32&expand=1753) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2udq))] pub unsafe fn _mm256_mask_cvtps_epu32(src: __m256i, k: __mmask8, a: __m256) -> __m256i { transmute(vcvtps2udq256(a.as_f32x8(), src.as_u32x8(), k)) @@ -10117,6 +10760,7 @@ pub unsafe fn _mm256_mask_cvtps_epu32(src: __m256i, k: __mmask8, a: __m256) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtps_epu32&expand=1754) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2udq))] pub unsafe fn _mm256_maskz_cvtps_epu32(k: __mmask8, a: __m256) -> __m256i { transmute(vcvtps2udq256( @@ -10131,6 +10775,7 @@ pub unsafe fn _mm256_maskz_cvtps_epu32(k: __mmask8, a: __m256) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_epu32&expand=1749) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2udq))] pub unsafe fn _mm_cvtps_epu32(a: __m128) -> __m128i { transmute(vcvtps2udq128( @@ -10145,6 +10790,7 @@ pub unsafe fn _mm_cvtps_epu32(a: __m128) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtps_epu32&expand=1750) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2udq))] pub unsafe fn _mm_mask_cvtps_epu32(src: __m128i, k: __mmask8, a: __m128) -> __m128i { transmute(vcvtps2udq128(a.as_f32x4(), src.as_u32x4(), k)) @@ -10155,6 +10801,7 @@ pub unsafe fn _mm_mask_cvtps_epu32(src: __m128i, k: __mmask8, a: __m128) -> __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtps_epu32&expand=1751) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2udq))] pub unsafe fn _mm_maskz_cvtps_epu32(k: __mmask8, a: __m128) -> __m128i { transmute(vcvtps2udq128( @@ -10169,6 +10816,7 @@ pub unsafe fn _mm_maskz_cvtps_epu32(k: __mmask8, a: __m128) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtps_pd&expand=1769) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2pd))] pub unsafe fn _mm512_cvtps_pd(a: __m256) -> __m512d { transmute(vcvtps2pd( @@ -10184,6 +10832,7 @@ pub unsafe fn _mm512_cvtps_pd(a: __m256) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtps_pd&expand=1770) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2pd))] pub unsafe fn _mm512_mask_cvtps_pd(src: __m512d, k: __mmask8, a: __m256) -> __m512d { transmute(vcvtps2pd( @@ -10199,6 +10848,7 @@ pub unsafe fn _mm512_mask_cvtps_pd(src: __m512d, k: __mmask8, a: __m256) -> __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtps_pd&expand=1771) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2pd))] pub unsafe fn _mm512_maskz_cvtps_pd(k: __mmask8, a: __m256) -> __m512d { transmute(vcvtps2pd( @@ -10214,6 +10864,7 @@ pub unsafe fn _mm512_maskz_cvtps_pd(k: __mmask8, a: __m256) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtpslo_pd&expand=1784) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2pd))] pub unsafe fn _mm512_cvtpslo_pd(v2: __m512) -> __m512d { transmute(vcvtps2pd( @@ -10229,6 +10880,7 @@ pub unsafe fn _mm512_cvtpslo_pd(v2: __m512) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtpslo_pd&expand=1785) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2pd))] pub unsafe fn _mm512_mask_cvtpslo_pd(src: __m512d, k: __mmask8, v2: __m512) -> __m512d { transmute(vcvtps2pd( @@ -10244,6 +10896,7 @@ pub unsafe fn _mm512_mask_cvtpslo_pd(src: __m512d, k: __mmask8, v2: __m512) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtpd_ps&expand=1712) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2ps))] pub unsafe fn _mm512_cvtpd_ps(a: __m512d) -> __m256 { transmute(vcvtpd2ps( @@ -10259,6 +10912,7 @@ pub unsafe fn _mm512_cvtpd_ps(a: __m512d) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtpd_ps&expand=1713) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2ps))] pub unsafe fn _mm512_mask_cvtpd_ps(src: __m256, k: __mmask8, a: __m512d) -> __m256 { transmute(vcvtpd2ps( @@ -10274,6 +10928,7 @@ pub unsafe fn _mm512_mask_cvtpd_ps(src: __m256, k: __mmask8, a: __m512d) -> __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtpd_ps&expand=1714) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2ps))] pub unsafe fn _mm512_maskz_cvtpd_ps(k: __mmask8, a: __m512d) -> __m256 { transmute(vcvtpd2ps( @@ -10289,6 +10944,7 @@ pub unsafe fn _mm512_maskz_cvtpd_ps(k: __mmask8, a: __m512d) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtpd_ps&expand=1710) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2ps))] pub unsafe fn _mm256_mask_cvtpd_ps(src: __m128, k: __mmask8, a: __m256d) -> __m128 { let convert = _mm256_cvtpd_ps(a); @@ -10300,6 +10956,7 @@ pub unsafe fn _mm256_mask_cvtpd_ps(src: __m128, k: __mmask8, a: __m256d) -> __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtpd_ps&expand=1711) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2ps))] pub unsafe fn _mm256_maskz_cvtpd_ps(k: __mmask8, a: __m256d) -> __m128 { let convert = _mm256_cvtpd_ps(a); @@ -10312,6 +10969,7 @@ pub unsafe fn _mm256_maskz_cvtpd_ps(k: __mmask8, a: __m256d) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtpd_ps&expand=1707) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2ps))] pub unsafe fn _mm_mask_cvtpd_ps(src: __m128, k: __mmask8, a: __m128d) -> __m128 { let convert = _mm_cvtpd_ps(a); @@ -10323,6 +10981,7 @@ pub unsafe fn _mm_mask_cvtpd_ps(src: __m128, k: __mmask8, a: __m128d) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtpd_ps&expand=1708) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2ps))] pub unsafe fn _mm_maskz_cvtpd_ps(k: __mmask8, a: __m128d) -> __m128 { let convert = _mm_cvtpd_ps(a); @@ -10335,6 +10994,7 @@ pub unsafe fn _mm_maskz_cvtpd_ps(k: __mmask8, a: __m128d) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtpd_epi32&expand=1675) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2dq))] pub unsafe fn _mm512_cvtpd_epi32(a: __m512d) -> __m256i { transmute(vcvtpd2dq( @@ -10350,6 +11010,7 @@ pub unsafe fn _mm512_cvtpd_epi32(a: __m512d) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtpd_epi32&expand=1676) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2dq))] pub unsafe fn _mm512_mask_cvtpd_epi32(src: __m256i, k: __mmask8, a: __m512d) -> __m256i { transmute(vcvtpd2dq( @@ -10365,6 +11026,7 @@ pub unsafe fn _mm512_mask_cvtpd_epi32(src: __m256i, k: __mmask8, a: __m512d) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtpd_epi32&expand=1677) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2dq))] pub unsafe fn _mm512_maskz_cvtpd_epi32(k: __mmask8, a: __m512d) -> __m256i { transmute(vcvtpd2dq( @@ -10380,6 +11042,7 @@ pub unsafe fn _mm512_maskz_cvtpd_epi32(k: __mmask8, a: __m512d) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtpd_epi32&expand=1673) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2dq))] pub unsafe fn _mm256_mask_cvtpd_epi32(src: __m128i, k: __mmask8, a: __m256d) -> __m128i { let convert = _mm256_cvtpd_epi32(a); @@ -10391,6 +11054,7 @@ pub unsafe fn _mm256_mask_cvtpd_epi32(src: __m128i, k: __mmask8, a: __m256d) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtpd_epi32&expand=1674) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2dq))] pub unsafe fn _mm256_maskz_cvtpd_epi32(k: __mmask8, a: __m256d) -> __m128i { let convert = _mm256_cvtpd_epi32(a); @@ -10406,6 +11070,7 @@ pub unsafe fn _mm256_maskz_cvtpd_epi32(k: __mmask8, a: __m256d) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtpd_epi32&expand=1670) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2dq))] pub unsafe fn _mm_mask_cvtpd_epi32(src: __m128i, k: __mmask8, a: __m128d) -> __m128i { let convert = _mm_cvtpd_epi32(a); @@ -10417,6 +11082,7 @@ pub unsafe fn _mm_mask_cvtpd_epi32(src: __m128i, k: __mmask8, a: __m128d) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtpd_epi32&expand=1671) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2dq))] pub unsafe fn _mm_maskz_cvtpd_epi32(k: __mmask8, a: __m128d) -> __m128i { let convert = _mm_cvtpd_epi32(a); @@ -10432,6 +11098,7 @@ pub unsafe fn _mm_maskz_cvtpd_epi32(k: __mmask8, a: __m128d) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtpd_epu32&expand=1693) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2udq))] pub unsafe fn _mm512_cvtpd_epu32(a: __m512d) -> __m256i { transmute(vcvtpd2udq( @@ -10447,6 +11114,7 @@ pub unsafe fn _mm512_cvtpd_epu32(a: __m512d) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtpd_epu32&expand=1694) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2udq))] pub unsafe fn _mm512_mask_cvtpd_epu32(src: __m256i, k: __mmask8, a: __m512d) -> __m256i { transmute(vcvtpd2udq( @@ -10462,6 +11130,7 @@ pub unsafe fn _mm512_mask_cvtpd_epu32(src: __m256i, k: __mmask8, a: __m512d) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtpd_epu32&expand=1695) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2udq))] pub unsafe fn _mm512_maskz_cvtpd_epu32(k: __mmask8, a: __m512d) -> __m256i { transmute(vcvtpd2udq( @@ -10477,6 +11146,7 @@ pub unsafe fn _mm512_maskz_cvtpd_epu32(k: __mmask8, a: __m512d) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtpd_epu32&expand=1690) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2udq))] pub unsafe fn _mm256_cvtpd_epu32(a: __m256d) -> __m128i { transmute(vcvtpd2udq256( @@ -10491,6 +11161,7 @@ pub unsafe fn _mm256_cvtpd_epu32(a: __m256d) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtpd_epu32&expand=1691) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2udq))] pub unsafe fn _mm256_mask_cvtpd_epu32(src: __m128i, k: __mmask8, a: __m256d) -> __m128i { transmute(vcvtpd2udq256(a.as_f64x4(), src.as_u32x4(), k)) @@ -10501,6 +11172,7 @@ pub unsafe fn _mm256_mask_cvtpd_epu32(src: __m128i, k: __mmask8, a: __m256d) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtpd_epu32&expand=1692) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2udq))] pub unsafe fn _mm256_maskz_cvtpd_epu32(k: __mmask8, a: __m256d) -> __m128i { transmute(vcvtpd2udq256( @@ -10515,6 +11187,7 @@ pub unsafe fn _mm256_maskz_cvtpd_epu32(k: __mmask8, a: __m256d) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpd_epu32&expand=1687) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2udq))] pub unsafe fn _mm_cvtpd_epu32(a: __m128d) -> __m128i { transmute(vcvtpd2udq128( @@ -10529,6 +11202,7 @@ pub unsafe fn _mm_cvtpd_epu32(a: __m128d) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtpd_epu32&expand=1688) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2udq))] pub unsafe fn _mm_mask_cvtpd_epu32(src: __m128i, k: __mmask8, a: __m128d) -> __m128i { transmute(vcvtpd2udq128(a.as_f64x2(), src.as_u32x4(), k)) @@ -10539,6 +11213,7 @@ pub unsafe fn _mm_mask_cvtpd_epu32(src: __m128i, k: __mmask8, a: __m128d) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtpd_epu32&expand=1689) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2udq))] pub unsafe fn _mm_maskz_cvtpd_epu32(k: __mmask8, a: __m128d) -> __m128i { transmute(vcvtpd2udq128( @@ -10553,6 +11228,7 @@ pub unsafe fn _mm_maskz_cvtpd_epu32(k: __mmask8, a: __m128d) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtpd_pslo&expand=1715) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2ps))] pub unsafe fn _mm512_cvtpd_pslo(v2: __m512d) -> __m512 { let r: f32x8 = vcvtpd2ps( @@ -10573,6 +11249,7 @@ pub unsafe fn _mm512_cvtpd_pslo(v2: __m512d) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtpd_pslo&expand=1716) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2ps))] pub unsafe fn _mm512_mask_cvtpd_pslo(src: __m512, k: __mmask8, v2: __m512d) -> __m512 { let r: f32x8 = vcvtpd2ps( @@ -10593,6 +11270,7 @@ pub unsafe fn _mm512_mask_cvtpd_pslo(src: __m512, k: __mmask8, v2: __m512d) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi8_epi32&expand=1535) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbd))] pub unsafe fn _mm512_cvtepi8_epi32(a: __m128i) -> __m512i { let a = a.as_i8x16(); @@ -10604,6 +11282,7 @@ pub unsafe fn _mm512_cvtepi8_epi32(a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi8_epi32&expand=1536) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbd))] pub unsafe fn _mm512_mask_cvtepi8_epi32(src: __m512i, k: __mmask16, a: __m128i) -> __m512i { let convert = _mm512_cvtepi8_epi32(a).as_i32x16(); @@ -10615,6 +11294,7 @@ pub unsafe fn _mm512_mask_cvtepi8_epi32(src: __m512i, k: __mmask16, a: __m128i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi8_epi32&expand=1537) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbd))] pub unsafe fn _mm512_maskz_cvtepi8_epi32(k: __mmask16, a: __m128i) -> __m512i { let convert = _mm512_cvtepi8_epi32(a).as_i32x16(); @@ -10627,6 +11307,7 @@ pub unsafe fn _mm512_maskz_cvtepi8_epi32(k: __mmask16, a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi8_epi32&expand=1533) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbd))] pub unsafe fn _mm256_mask_cvtepi8_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepi8_epi32(a).as_i32x8(); @@ -10638,6 +11319,7 @@ pub unsafe fn _mm256_mask_cvtepi8_epi32(src: __m256i, k: __mmask8, a: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi8_epi32&expand=1534) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbd))] pub unsafe fn _mm256_maskz_cvtepi8_epi32(k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepi8_epi32(a).as_i32x8(); @@ -10650,6 +11332,7 @@ pub unsafe fn _mm256_maskz_cvtepi8_epi32(k: __mmask8, a: __m128i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi8_epi32&expand=1530) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbd))] pub unsafe fn _mm_mask_cvtepi8_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepi8_epi32(a).as_i32x4(); @@ -10661,6 +11344,7 @@ pub unsafe fn _mm_mask_cvtepi8_epi32(src: __m128i, k: __mmask8, a: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi8_epi32&expand=1531) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbd))] pub unsafe fn _mm_maskz_cvtepi8_epi32(k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepi8_epi32(a).as_i32x4(); @@ -10673,6 +11357,7 @@ pub unsafe fn _mm_maskz_cvtepi8_epi32(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi8_epi64&expand=1544) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbq))] pub unsafe fn _mm512_cvtepi8_epi64(a: __m128i) -> __m512i { let a = a.as_i8x16(); @@ -10685,6 +11370,7 @@ pub unsafe fn _mm512_cvtepi8_epi64(a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi8_epi64&expand=1545) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbq))] pub unsafe fn _mm512_mask_cvtepi8_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m512i { let convert = _mm512_cvtepi8_epi64(a).as_i64x8(); @@ -10696,6 +11382,7 @@ pub unsafe fn _mm512_mask_cvtepi8_epi64(src: __m512i, k: __mmask8, a: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi8_epi64&expand=1546) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbq))] pub unsafe fn _mm512_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m512i { let convert = _mm512_cvtepi8_epi64(a).as_i64x8(); @@ -10708,6 +11395,7 @@ pub unsafe fn _mm512_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi8_epi64&expand=1542) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbq))] pub unsafe fn _mm256_mask_cvtepi8_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepi8_epi64(a).as_i64x4(); @@ -10719,6 +11407,7 @@ pub unsafe fn _mm256_mask_cvtepi8_epi64(src: __m256i, k: __mmask8, a: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi8_epi64&expand=1543) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbq))] pub unsafe fn _mm256_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepi8_epi64(a).as_i64x4(); @@ -10731,6 +11420,7 @@ pub unsafe fn _mm256_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi8_epi64&expand=1539) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbq))] pub unsafe fn _mm_mask_cvtepi8_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepi8_epi64(a).as_i64x2(); @@ -10742,6 +11432,7 @@ pub unsafe fn _mm_mask_cvtepi8_epi64(src: __m128i, k: __mmask8, a: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi8_epi64&expand=1540) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxbq))] pub unsafe fn _mm_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepi8_epi64(a).as_i64x2(); @@ -10754,6 +11445,7 @@ pub unsafe fn _mm_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu8_epi32&expand=1621) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbd))] pub unsafe fn _mm512_cvtepu8_epi32(a: __m128i) -> __m512i { let a = a.as_u8x16(); @@ -10765,6 +11457,7 @@ pub unsafe fn _mm512_cvtepu8_epi32(a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu8_epi32&expand=1622) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbd))] pub unsafe fn _mm512_mask_cvtepu8_epi32(src: __m512i, k: __mmask16, a: __m128i) -> __m512i { let convert = _mm512_cvtepu8_epi32(a).as_i32x16(); @@ -10776,6 +11469,7 @@ pub unsafe fn _mm512_mask_cvtepu8_epi32(src: __m512i, k: __mmask16, a: __m128i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepu8_epi32&expand=1623) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbd))] pub unsafe fn _mm512_maskz_cvtepu8_epi32(k: __mmask16, a: __m128i) -> __m512i { let convert = _mm512_cvtepu8_epi32(a).as_i32x16(); @@ -10788,6 +11482,7 @@ pub unsafe fn _mm512_maskz_cvtepu8_epi32(k: __mmask16, a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepu8_epi32&expand=1619) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbd))] pub unsafe fn _mm256_mask_cvtepu8_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepu8_epi32(a).as_i32x8(); @@ -10799,6 +11494,7 @@ pub unsafe fn _mm256_mask_cvtepu8_epi32(src: __m256i, k: __mmask8, a: __m128i) - /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/IntrinsicsGuide/#text=_mm256_maskz_cvtepu8_epi32&expand=1620) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbd))] pub unsafe fn _mm256_maskz_cvtepu8_epi32(k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepu8_epi32(a).as_i32x8(); @@ -10811,6 +11507,7 @@ pub unsafe fn _mm256_maskz_cvtepu8_epi32(k: __mmask8, a: __m128i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepu8_epi32&expand=1616) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbd))] pub unsafe fn _mm_mask_cvtepu8_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepu8_epi32(a).as_i32x4(); @@ -10822,6 +11519,7 @@ pub unsafe fn _mm_mask_cvtepu8_epi32(src: __m128i, k: __mmask8, a: __m128i) -> _ /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/IntrinsicsGuide/#text=_mm_maskz_cvtepu8_epi32&expand=1617) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbd))] pub unsafe fn _mm_maskz_cvtepu8_epi32(k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepu8_epi32(a).as_i32x4(); @@ -10834,6 +11532,7 @@ pub unsafe fn _mm_maskz_cvtepu8_epi32(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu8_epi64&expand=1630) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbq))] pub unsafe fn _mm512_cvtepu8_epi64(a: __m128i) -> __m512i { let a = a.as_u8x16(); @@ -10846,6 +11545,7 @@ pub unsafe fn _mm512_cvtepu8_epi64(a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu8_epi64&expand=1631) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbq))] pub unsafe fn _mm512_mask_cvtepu8_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m512i { let convert = _mm512_cvtepu8_epi64(a).as_i64x8(); @@ -10857,6 +11557,7 @@ pub unsafe fn _mm512_mask_cvtepu8_epi64(src: __m512i, k: __mmask8, a: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepu8_epi64&expand=1632) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbq))] pub unsafe fn _mm512_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m512i { let convert = _mm512_cvtepu8_epi64(a).as_i64x8(); @@ -10869,6 +11570,7 @@ pub unsafe fn _mm512_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepu8_epi64&expand=1628) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbq))] pub unsafe fn _mm256_mask_cvtepu8_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepu8_epi64(a).as_i64x4(); @@ -10880,6 +11582,7 @@ pub unsafe fn _mm256_mask_cvtepu8_epi64(src: __m256i, k: __mmask8, a: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepu8_epi64&expand=1629) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbq))] pub unsafe fn _mm256_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepu8_epi64(a).as_i64x4(); @@ -10892,6 +11595,7 @@ pub unsafe fn _mm256_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepu8_epi64&expand=1625) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbq))] pub unsafe fn _mm_mask_cvtepu8_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepu8_epi64(a).as_i64x2(); @@ -10903,6 +11607,7 @@ pub unsafe fn _mm_mask_cvtepu8_epi64(src: __m128i, k: __mmask8, a: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepu8_epi64&expand=1626) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxbq))] pub unsafe fn _mm_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepu8_epi64(a).as_i64x2(); @@ -10915,6 +11620,7 @@ pub unsafe fn _mm_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi16_epi32&expand=1389) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxwd))] pub unsafe fn _mm512_cvtepi16_epi32(a: __m256i) -> __m512i { let a = a.as_i16x16(); @@ -10926,6 +11632,7 @@ pub unsafe fn _mm512_cvtepi16_epi32(a: __m256i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi16_epi32&expand=1390) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxwd))] pub unsafe fn _mm512_mask_cvtepi16_epi32(src: __m512i, k: __mmask16, a: __m256i) -> __m512i { let convert = _mm512_cvtepi16_epi32(a).as_i32x16(); @@ -10937,6 +11644,7 @@ pub unsafe fn _mm512_mask_cvtepi16_epi32(src: __m512i, k: __mmask16, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi16_epi32&expand=1391) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxwd))] pub unsafe fn _mm512_maskz_cvtepi16_epi32(k: __mmask16, a: __m256i) -> __m512i { let convert = _mm512_cvtepi16_epi32(a).as_i32x16(); @@ -10949,6 +11657,7 @@ pub unsafe fn _mm512_maskz_cvtepi16_epi32(k: __mmask16, a: __m256i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi16_epi32&expand=1387) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxwd))] pub unsafe fn _mm256_mask_cvtepi16_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepi16_epi32(a).as_i32x8(); @@ -10960,6 +11669,7 @@ pub unsafe fn _mm256_mask_cvtepi16_epi32(src: __m256i, k: __mmask8, a: __m128i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi16_epi32&expand=1388) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxwd))] pub unsafe fn _mm256_maskz_cvtepi16_epi32(k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepi16_epi32(a).as_i32x8(); @@ -10972,6 +11682,7 @@ pub unsafe fn _mm256_maskz_cvtepi16_epi32(k: __mmask8, a: __m128i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi16_epi32&expand=1384) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxwd))] pub unsafe fn _mm_mask_cvtepi16_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepi16_epi32(a).as_i32x4(); @@ -10983,6 +11694,7 @@ pub unsafe fn _mm_mask_cvtepi16_epi32(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi16_epi32&expand=1385) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxwd))] pub unsafe fn _mm_maskz_cvtepi16_epi32(k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepi16_epi32(a).as_i32x4(); @@ -10995,6 +11707,7 @@ pub unsafe fn _mm_maskz_cvtepi16_epi32(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi16_epi64&expand=1398) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxwq))] pub unsafe fn _mm512_cvtepi16_epi64(a: __m128i) -> __m512i { let a = a.as_i16x8(); @@ -11006,6 +11719,7 @@ pub unsafe fn _mm512_cvtepi16_epi64(a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi16_epi64&expand=1399) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxwq))] pub unsafe fn _mm512_mask_cvtepi16_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m512i { let convert = _mm512_cvtepi16_epi64(a).as_i64x8(); @@ -11017,6 +11731,7 @@ pub unsafe fn _mm512_mask_cvtepi16_epi64(src: __m512i, k: __mmask8, a: __m128i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi16_epi64&expand=1400) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxwq))] pub unsafe fn _mm512_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m512i { let convert = _mm512_cvtepi16_epi64(a).as_i64x8(); @@ -11029,6 +11744,7 @@ pub unsafe fn _mm512_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi16_epi64&expand=1396) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxwq))] pub unsafe fn _mm256_mask_cvtepi16_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepi16_epi64(a).as_i64x4(); @@ -11040,6 +11756,7 @@ pub unsafe fn _mm256_mask_cvtepi16_epi64(src: __m256i, k: __mmask8, a: __m128i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi16_epi64&expand=1397) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxwq))] pub unsafe fn _mm256_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepi16_epi64(a).as_i64x4(); @@ -11052,6 +11769,7 @@ pub unsafe fn _mm256_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi16_epi64&expand=1393) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxwq))] pub unsafe fn _mm_mask_cvtepi16_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepi16_epi64(a).as_i64x2(); @@ -11063,6 +11781,7 @@ pub unsafe fn _mm_mask_cvtepi16_epi64(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi16_epi64&expand=1394) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxwq))] pub unsafe fn _mm_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepi16_epi64(a).as_i64x2(); @@ -11075,6 +11794,7 @@ pub unsafe fn _mm_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu16_epi32&expand=1553) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxwd))] pub unsafe fn _mm512_cvtepu16_epi32(a: __m256i) -> __m512i { let a = a.as_u16x16(); @@ -11086,6 +11806,7 @@ pub unsafe fn _mm512_cvtepu16_epi32(a: __m256i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu16_epi32&expand=1554) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxwd))] pub unsafe fn _mm512_mask_cvtepu16_epi32(src: __m512i, k: __mmask16, a: __m256i) -> __m512i { let convert = _mm512_cvtepu16_epi32(a).as_i32x16(); @@ -11097,6 +11818,7 @@ pub unsafe fn _mm512_mask_cvtepu16_epi32(src: __m512i, k: __mmask16, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepu16_epi32&expand=1555) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxwd))] pub unsafe fn _mm512_maskz_cvtepu16_epi32(k: __mmask16, a: __m256i) -> __m512i { let convert = _mm512_cvtepu16_epi32(a).as_i32x16(); @@ -11109,6 +11831,7 @@ pub unsafe fn _mm512_maskz_cvtepu16_epi32(k: __mmask16, a: __m256i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepu16_epi32&expand=1551) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxwd))] pub unsafe fn _mm256_mask_cvtepu16_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepu16_epi32(a).as_i32x8(); @@ -11120,6 +11843,7 @@ pub unsafe fn _mm256_mask_cvtepu16_epi32(src: __m256i, k: __mmask8, a: __m128i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepu16_epi32&expand=1552) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxwd))] pub unsafe fn _mm256_maskz_cvtepu16_epi32(k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepu16_epi32(a).as_i32x8(); @@ -11132,6 +11856,7 @@ pub unsafe fn _mm256_maskz_cvtepu16_epi32(k: __mmask8, a: __m128i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepu16_epi32&expand=1548) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxwd))] pub unsafe fn _mm_mask_cvtepu16_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepu16_epi32(a).as_i32x4(); @@ -11143,6 +11868,7 @@ pub unsafe fn _mm_mask_cvtepu16_epi32(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepu16_epi32&expand=1549) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxwd))] pub unsafe fn _mm_maskz_cvtepu16_epi32(k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepu16_epi32(a).as_i32x4(); @@ -11155,6 +11881,7 @@ pub unsafe fn _mm_maskz_cvtepu16_epi32(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu16_epi64&expand=1562) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxwq))] pub unsafe fn _mm512_cvtepu16_epi64(a: __m128i) -> __m512i { let a = a.as_u16x8(); @@ -11166,6 +11893,7 @@ pub unsafe fn _mm512_cvtepu16_epi64(a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu16_epi64&expand=1563) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxwq))] pub unsafe fn _mm512_mask_cvtepu16_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m512i { let convert = _mm512_cvtepu16_epi64(a).as_i64x8(); @@ -11177,6 +11905,7 @@ pub unsafe fn _mm512_mask_cvtepu16_epi64(src: __m512i, k: __mmask8, a: __m128i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepu16_epi64&expand=1564) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxwq))] pub unsafe fn _mm512_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m512i { let convert = _mm512_cvtepu16_epi64(a).as_i64x8(); @@ -11189,6 +11918,7 @@ pub unsafe fn _mm512_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepu16_epi64&expand=1560) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxwq))] pub unsafe fn _mm256_mask_cvtepu16_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepu16_epi64(a).as_i64x4(); @@ -11200,6 +11930,7 @@ pub unsafe fn _mm256_mask_cvtepu16_epi64(src: __m256i, k: __mmask8, a: __m128i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepu16_epi64&expand=1561) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxwq))] pub unsafe fn _mm256_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepu16_epi64(a).as_i64x4(); @@ -11212,6 +11943,7 @@ pub unsafe fn _mm256_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepu16_epi64&expand=1557) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxwq))] pub unsafe fn _mm_mask_cvtepu16_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepu16_epi64(a).as_i64x2(); @@ -11223,6 +11955,7 @@ pub unsafe fn _mm_mask_cvtepu16_epi64(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepu16_epi64&expand=1558) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxwq))] pub unsafe fn _mm_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepu16_epi64(a).as_i64x2(); @@ -11235,6 +11968,7 @@ pub unsafe fn _mm_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi32_epi64&expand=1428) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxdq))] pub unsafe fn _mm512_cvtepi32_epi64(a: __m256i) -> __m512i { let a = a.as_i32x8(); @@ -11246,6 +11980,7 @@ pub unsafe fn _mm512_cvtepi32_epi64(a: __m256i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi32_epi64&expand=1429) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxdq))] pub unsafe fn _mm512_mask_cvtepi32_epi64(src: __m512i, k: __mmask8, a: __m256i) -> __m512i { let convert = _mm512_cvtepi32_epi64(a).as_i64x8(); @@ -11257,6 +11992,7 @@ pub unsafe fn _mm512_mask_cvtepi32_epi64(src: __m512i, k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi32_epi64&expand=1430) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxdq))] pub unsafe fn _mm512_maskz_cvtepi32_epi64(k: __mmask8, a: __m256i) -> __m512i { let convert = _mm512_cvtepi32_epi64(a).as_i64x8(); @@ -11269,6 +12005,7 @@ pub unsafe fn _mm512_maskz_cvtepi32_epi64(k: __mmask8, a: __m256i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi32_epi64&expand=1426) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxdq))] pub unsafe fn _mm256_mask_cvtepi32_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepi32_epi64(a).as_i64x4(); @@ -11280,6 +12017,7 @@ pub unsafe fn _mm256_mask_cvtepi32_epi64(src: __m256i, k: __mmask8, a: __m128i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi32_epi64&expand=1427) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxdq))] pub unsafe fn _mm256_maskz_cvtepi32_epi64(k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepi32_epi64(a).as_i64x4(); @@ -11292,6 +12030,7 @@ pub unsafe fn _mm256_maskz_cvtepi32_epi64(k: __mmask8, a: __m128i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi32_epi64&expand=1423) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxdq))] pub unsafe fn _mm_mask_cvtepi32_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepi32_epi64(a).as_i64x2(); @@ -11303,6 +12042,7 @@ pub unsafe fn _mm_mask_cvtepi32_epi64(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi32_epi64&expand=1424) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsxdq))] pub unsafe fn _mm_maskz_cvtepi32_epi64(k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepi32_epi64(a).as_i64x2(); @@ -11315,6 +12055,7 @@ pub unsafe fn _mm_maskz_cvtepi32_epi64(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu32_epi64&expand=1571) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxdq))] pub unsafe fn _mm512_cvtepu32_epi64(a: __m256i) -> __m512i { let a = a.as_u32x8(); @@ -11326,6 +12067,7 @@ pub unsafe fn _mm512_cvtepu32_epi64(a: __m256i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu32_epi64&expand=1572) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxdq))] pub unsafe fn _mm512_mask_cvtepu32_epi64(src: __m512i, k: __mmask8, a: __m256i) -> __m512i { let convert = _mm512_cvtepu32_epi64(a).as_i64x8(); @@ -11337,6 +12079,7 @@ pub unsafe fn _mm512_mask_cvtepu32_epi64(src: __m512i, k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepu32_epi64&expand=1573) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxdq))] pub unsafe fn _mm512_maskz_cvtepu32_epi64(k: __mmask8, a: __m256i) -> __m512i { let convert = _mm512_cvtepu32_epi64(a).as_i64x8(); @@ -11349,6 +12092,7 @@ pub unsafe fn _mm512_maskz_cvtepu32_epi64(k: __mmask8, a: __m256i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepu32_epi64&expand=1569) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxdq))] pub unsafe fn _mm256_mask_cvtepu32_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepu32_epi64(a).as_i64x4(); @@ -11360,6 +12104,7 @@ pub unsafe fn _mm256_mask_cvtepu32_epi64(src: __m256i, k: __mmask8, a: __m128i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepu32_epi64&expand=1570) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxdq))] pub unsafe fn _mm256_maskz_cvtepu32_epi64(k: __mmask8, a: __m128i) -> __m256i { let convert = _mm256_cvtepu32_epi64(a).as_i64x4(); @@ -11372,6 +12117,7 @@ pub unsafe fn _mm256_maskz_cvtepu32_epi64(k: __mmask8, a: __m128i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepu32_epi64&expand=1566) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxdq))] pub unsafe fn _mm_mask_cvtepu32_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepu32_epi64(a).as_i64x2(); @@ -11383,6 +12129,7 @@ pub unsafe fn _mm_mask_cvtepu32_epi64(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepu32_epi64&expand=1567) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovzxdq))] pub unsafe fn _mm_maskz_cvtepu32_epi64(k: __mmask8, a: __m128i) -> __m128i { let convert = _mm_cvtepu32_epi64(a).as_i64x2(); @@ -11395,6 +12142,7 @@ pub unsafe fn _mm_maskz_cvtepu32_epi64(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi32_ps&expand=1455) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2ps))] pub unsafe fn _mm512_cvtepi32_ps(a: __m512i) -> __m512 { let a = a.as_i32x16(); @@ -11406,6 +12154,7 @@ pub unsafe fn _mm512_cvtepi32_ps(a: __m512i) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi32_ps&expand=1456) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2ps))] pub unsafe fn _mm512_mask_cvtepi32_ps(src: __m512, k: __mmask16, a: __m512i) -> __m512 { let convert = _mm512_cvtepi32_ps(a).as_f32x16(); @@ -11417,6 +12166,7 @@ pub unsafe fn _mm512_mask_cvtepi32_ps(src: __m512, k: __mmask16, a: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi32_ps&expand=1457) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2ps))] pub unsafe fn _mm512_maskz_cvtepi32_ps(k: __mmask16, a: __m512i) -> __m512 { let convert = _mm512_cvtepi32_ps(a).as_f32x16(); @@ -11429,6 +12179,7 @@ pub unsafe fn _mm512_maskz_cvtepi32_ps(k: __mmask16, a: __m512i) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi32_ps&expand=1453) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2ps))] pub unsafe fn _mm256_mask_cvtepi32_ps(src: __m256, k: __mmask8, a: __m256i) -> __m256 { let convert = _mm256_cvtepi32_ps(a).as_f32x8(); @@ -11440,6 +12191,7 @@ pub unsafe fn _mm256_mask_cvtepi32_ps(src: __m256, k: __mmask8, a: __m256i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi32_ps&expand=1454) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2ps))] pub unsafe fn _mm256_maskz_cvtepi32_ps(k: __mmask8, a: __m256i) -> __m256 { let convert = _mm256_cvtepi32_ps(a).as_f32x8(); @@ -11452,6 +12204,7 @@ pub unsafe fn _mm256_maskz_cvtepi32_ps(k: __mmask8, a: __m256i) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi32_ps&expand=1450) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2ps))] pub unsafe fn _mm_mask_cvtepi32_ps(src: __m128, k: __mmask8, a: __m128i) -> __m128 { let convert = _mm_cvtepi32_ps(a).as_f32x4(); @@ -11463,6 +12216,7 @@ pub unsafe fn _mm_mask_cvtepi32_ps(src: __m128, k: __mmask8, a: __m128i) -> __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi32_ps&expand=1451) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2ps))] pub unsafe fn _mm_maskz_cvtepi32_ps(k: __mmask8, a: __m128i) -> __m128 { let convert = _mm_cvtepi32_ps(a).as_f32x4(); @@ -11475,6 +12229,7 @@ pub unsafe fn _mm_maskz_cvtepi32_ps(k: __mmask8, a: __m128i) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi32_pd&expand=1446) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] pub unsafe fn _mm512_cvtepi32_pd(a: __m256i) -> __m512d { let a = a.as_i32x8(); @@ -11486,6 +12241,7 @@ pub unsafe fn _mm512_cvtepi32_pd(a: __m256i) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi32_pd&expand=1447) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] pub unsafe fn _mm512_mask_cvtepi32_pd(src: __m512d, k: __mmask8, a: __m256i) -> __m512d { let convert = _mm512_cvtepi32_pd(a).as_f64x8(); @@ -11497,6 +12253,7 @@ pub unsafe fn _mm512_mask_cvtepi32_pd(src: __m512d, k: __mmask8, a: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi32_pd&expand=1448) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] pub unsafe fn _mm512_maskz_cvtepi32_pd(k: __mmask8, a: __m256i) -> __m512d { let convert = _mm512_cvtepi32_pd(a).as_f64x8(); @@ -11509,6 +12266,7 @@ pub unsafe fn _mm512_maskz_cvtepi32_pd(k: __mmask8, a: __m256i) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi32_pd&expand=1444) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] pub unsafe fn _mm256_mask_cvtepi32_pd(src: __m256d, k: __mmask8, a: __m128i) -> __m256d { let convert = _mm256_cvtepi32_pd(a).as_f64x4(); @@ -11520,6 +12278,7 @@ pub unsafe fn _mm256_mask_cvtepi32_pd(src: __m256d, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi32_pd&expand=1445) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] pub unsafe fn _mm256_maskz_cvtepi32_pd(k: __mmask8, a: __m128i) -> __m256d { let convert = _mm256_cvtepi32_pd(a).as_f64x4(); @@ -11532,6 +12291,7 @@ pub unsafe fn _mm256_maskz_cvtepi32_pd(k: __mmask8, a: __m128i) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi32_pd&expand=1441) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] pub unsafe fn _mm_mask_cvtepi32_pd(src: __m128d, k: __mmask8, a: __m128i) -> __m128d { let convert = _mm_cvtepi32_pd(a).as_f64x2(); @@ -11543,6 +12303,7 @@ pub unsafe fn _mm_mask_cvtepi32_pd(src: __m128d, k: __mmask8, a: __m128i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi32_pd&expand=1442) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] pub unsafe fn _mm_maskz_cvtepi32_pd(k: __mmask8, a: __m128i) -> __m128d { let convert = _mm_cvtepi32_pd(a).as_f64x2(); @@ -11555,6 +12316,7 @@ pub unsafe fn _mm_maskz_cvtepi32_pd(k: __mmask8, a: __m128i) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu32_ps&expand=1583) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtudq2ps))] pub unsafe fn _mm512_cvtepu32_ps(a: __m512i) -> __m512 { let a = a.as_u32x16(); @@ -11566,6 +12328,7 @@ pub unsafe fn _mm512_cvtepu32_ps(a: __m512i) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu32_ps&expand=1584) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtudq2ps))] pub unsafe fn _mm512_mask_cvtepu32_ps(src: __m512, k: __mmask16, a: __m512i) -> __m512 { let convert = _mm512_cvtepu32_ps(a).as_f32x16(); @@ -11577,6 +12340,7 @@ pub unsafe fn _mm512_mask_cvtepu32_ps(src: __m512, k: __mmask16, a: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepu32_ps&expand=1585) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtudq2ps))] pub unsafe fn _mm512_maskz_cvtepu32_ps(k: __mmask16, a: __m512i) -> __m512 { let convert = _mm512_cvtepu32_ps(a).as_f32x16(); @@ -11589,6 +12353,7 @@ pub unsafe fn _mm512_maskz_cvtepu32_ps(k: __mmask16, a: __m512i) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu32_pd&expand=1580) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] pub unsafe fn _mm512_cvtepu32_pd(a: __m256i) -> __m512d { let a = a.as_u32x8(); @@ -11600,6 +12365,7 @@ pub unsafe fn _mm512_cvtepu32_pd(a: __m256i) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu32_pd&expand=1581) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] pub unsafe fn _mm512_mask_cvtepu32_pd(src: __m512d, k: __mmask8, a: __m256i) -> __m512d { let convert = _mm512_cvtepu32_pd(a).as_f64x8(); @@ -11611,6 +12377,7 @@ pub unsafe fn _mm512_mask_cvtepu32_pd(src: __m512d, k: __mmask8, a: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepu32_pd&expand=1582) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] pub unsafe fn _mm512_maskz_cvtepu32_pd(k: __mmask8, a: __m256i) -> __m512d { let convert = _mm512_cvtepu32_pd(a).as_f64x8(); @@ -11623,6 +12390,7 @@ pub unsafe fn _mm512_maskz_cvtepu32_pd(k: __mmask8, a: __m256i) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepu32_pd&expand=1577) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] pub unsafe fn _mm256_cvtepu32_pd(a: __m128i) -> __m256d { let a = a.as_u32x4(); @@ -11634,6 +12402,7 @@ pub unsafe fn _mm256_cvtepu32_pd(a: __m128i) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepu32_pd&expand=1578) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] pub unsafe fn _mm256_mask_cvtepu32_pd(src: __m256d, k: __mmask8, a: __m128i) -> __m256d { let convert = _mm256_cvtepu32_pd(a).as_f64x4(); @@ -11645,6 +12414,7 @@ pub unsafe fn _mm256_mask_cvtepu32_pd(src: __m256d, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepu32_pd&expand=1579) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] pub unsafe fn _mm256_maskz_cvtepu32_pd(k: __mmask8, a: __m128i) -> __m256d { let convert = _mm256_cvtepu32_pd(a).as_f64x4(); @@ -11657,6 +12427,7 @@ pub unsafe fn _mm256_maskz_cvtepu32_pd(k: __mmask8, a: __m128i) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu32_pd&expand=1574) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] pub unsafe fn _mm_cvtepu32_pd(a: __m128i) -> __m128d { let a = a.as_u32x4(); @@ -11669,6 +12440,7 @@ pub unsafe fn _mm_cvtepu32_pd(a: __m128i) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepu32_pd&expand=1575) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] pub unsafe fn _mm_mask_cvtepu32_pd(src: __m128d, k: __mmask8, a: __m128i) -> __m128d { let convert = _mm_cvtepu32_pd(a).as_f64x2(); @@ -11680,6 +12452,7 @@ pub unsafe fn _mm_mask_cvtepu32_pd(src: __m128d, k: __mmask8, a: __m128i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepu32_pd&expand=1576) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] pub unsafe fn _mm_maskz_cvtepu32_pd(k: __mmask8, a: __m128i) -> __m128d { let convert = _mm_cvtepu32_pd(a).as_f64x2(); @@ -11692,6 +12465,7 @@ pub unsafe fn _mm_maskz_cvtepu32_pd(k: __mmask8, a: __m128i) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi32lo_pd&expand=1464) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] pub unsafe fn _mm512_cvtepi32lo_pd(v2: __m512i) -> __m512d { let v2 = v2.as_i32x16(); @@ -11704,6 +12478,7 @@ pub unsafe fn _mm512_cvtepi32lo_pd(v2: __m512i) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi32lo_pd&expand=1465) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] pub unsafe fn _mm512_mask_cvtepi32lo_pd(src: __m512d, k: __mmask8, v2: __m512i) -> __m512d { let convert = _mm512_cvtepi32lo_pd(v2).as_f64x8(); @@ -11715,6 +12490,7 @@ pub unsafe fn _mm512_mask_cvtepi32lo_pd(src: __m512d, k: __mmask8, v2: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu32lo_pd&expand=1586) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] pub unsafe fn _mm512_cvtepu32lo_pd(v2: __m512i) -> __m512d { let v2 = v2.as_u32x16(); @@ -11727,6 +12503,7 @@ pub unsafe fn _mm512_cvtepu32lo_pd(v2: __m512i) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu32lo_pd&expand=1587) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtudq2pd))] pub unsafe fn _mm512_mask_cvtepu32lo_pd(src: __m512d, k: __mmask8, v2: __m512i) -> __m512d { let convert = _mm512_cvtepu32lo_pd(v2).as_f64x8(); @@ -11738,6 +12515,7 @@ pub unsafe fn _mm512_mask_cvtepu32lo_pd(src: __m512d, k: __mmask8, v2: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi32_epi16&expand=1419) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdw))] pub unsafe fn _mm512_cvtepi32_epi16(a: __m512i) -> __m256i { let a = a.as_i32x16(); @@ -11749,6 +12527,7 @@ pub unsafe fn _mm512_cvtepi32_epi16(a: __m512i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi32_epi16&expand=1420) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdw))] pub unsafe fn _mm512_mask_cvtepi32_epi16(src: __m256i, k: __mmask16, a: __m512i) -> __m256i { let convert = _mm512_cvtepi32_epi16(a).as_i16x16(); @@ -11760,6 +12539,7 @@ pub unsafe fn _mm512_mask_cvtepi32_epi16(src: __m256i, k: __mmask16, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi32_epi16&expand=1421) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdw))] pub unsafe fn _mm512_maskz_cvtepi32_epi16(k: __mmask16, a: __m512i) -> __m256i { let convert = _mm512_cvtepi32_epi16(a).as_i16x16(); @@ -11772,6 +12552,7 @@ pub unsafe fn _mm512_maskz_cvtepi32_epi16(k: __mmask16, a: __m512i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi32_epi16&expand=1416) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdw))] pub unsafe fn _mm256_cvtepi32_epi16(a: __m256i) -> __m128i { let a = a.as_i32x8(); @@ -11783,6 +12564,7 @@ pub unsafe fn _mm256_cvtepi32_epi16(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi32_epi16&expand=1417) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdw))] pub unsafe fn _mm256_mask_cvtepi32_epi16(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { let convert = _mm256_cvtepi32_epi16(a).as_i16x8(); @@ -11794,6 +12576,7 @@ pub unsafe fn _mm256_mask_cvtepi32_epi16(src: __m128i, k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi32_epi16&expand=1418) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdw))] pub unsafe fn _mm256_maskz_cvtepi32_epi16(k: __mmask8, a: __m256i) -> __m128i { let convert = _mm256_cvtepi32_epi16(a).as_i16x8(); @@ -11806,6 +12589,7 @@ pub unsafe fn _mm256_maskz_cvtepi32_epi16(k: __mmask8, a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_epi16&expand=1413) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdw))] pub unsafe fn _mm_cvtepi32_epi16(a: __m128i) -> __m128i { transmute(vpmovdw128( @@ -11820,6 +12604,7 @@ pub unsafe fn _mm_cvtepi32_epi16(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi32_epi16&expand=1414) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdw))] pub unsafe fn _mm_mask_cvtepi32_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovdw128(a.as_i32x4(), src.as_i16x8(), k)) @@ -11830,6 +12615,7 @@ pub unsafe fn _mm_mask_cvtepi32_epi16(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi32_epi16&expand=1415) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdw))] pub unsafe fn _mm_maskz_cvtepi32_epi16(k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovdw128(a.as_i32x4(), _mm_setzero_si128().as_i16x8(), k)) @@ -11840,6 +12626,7 @@ pub unsafe fn _mm_maskz_cvtepi32_epi16(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi32_epi8&expand=1437) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdb))] pub unsafe fn _mm512_cvtepi32_epi8(a: __m512i) -> __m128i { let a = a.as_i32x16(); @@ -11851,6 +12638,7 @@ pub unsafe fn _mm512_cvtepi32_epi8(a: __m512i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi32_epi8&expand=1438) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdb))] pub unsafe fn _mm512_mask_cvtepi32_epi8(src: __m128i, k: __mmask16, a: __m512i) -> __m128i { let convert = _mm512_cvtepi32_epi8(a).as_i8x16(); @@ -11862,6 +12650,7 @@ pub unsafe fn _mm512_mask_cvtepi32_epi8(src: __m128i, k: __mmask16, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi32_epi8&expand=1439) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdb))] pub unsafe fn _mm512_maskz_cvtepi32_epi8(k: __mmask16, a: __m512i) -> __m128i { let convert = _mm512_cvtepi32_epi8(a).as_i8x16(); @@ -11874,6 +12663,7 @@ pub unsafe fn _mm512_maskz_cvtepi32_epi8(k: __mmask16, a: __m512i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi32_epi8&expand=1434) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdb))] pub unsafe fn _mm256_cvtepi32_epi8(a: __m256i) -> __m128i { transmute(vpmovdb256( @@ -11888,6 +12678,7 @@ pub unsafe fn _mm256_cvtepi32_epi8(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi32_epi8&expand=1435) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdb))] pub unsafe fn _mm256_mask_cvtepi32_epi8(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovdb256(a.as_i32x8(), src.as_i8x16(), k)) @@ -11898,6 +12689,7 @@ pub unsafe fn _mm256_mask_cvtepi32_epi8(src: __m128i, k: __mmask8, a: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi32_epi8&expand=1436) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdb))] pub unsafe fn _mm256_maskz_cvtepi32_epi8(k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovdb256(a.as_i32x8(), _mm_setzero_si128().as_i8x16(), k)) @@ -11908,6 +12700,7 @@ pub unsafe fn _mm256_maskz_cvtepi32_epi8(k: __mmask8, a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_epi8&expand=1431) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdb))] pub unsafe fn _mm_cvtepi32_epi8(a: __m128i) -> __m128i { transmute(vpmovdb128( @@ -11922,6 +12715,7 @@ pub unsafe fn _mm_cvtepi32_epi8(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi32_epi8&expand=1432) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdb))] pub unsafe fn _mm_mask_cvtepi32_epi8(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovdb128(a.as_i32x4(), src.as_i8x16(), k)) @@ -11932,6 +12726,7 @@ pub unsafe fn _mm_mask_cvtepi32_epi8(src: __m128i, k: __mmask8, a: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi32_epi8&expand=1433) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdb))] pub unsafe fn _mm_maskz_cvtepi32_epi8(k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovdb128(a.as_i32x4(), _mm_setzero_si128().as_i8x16(), k)) @@ -11942,6 +12737,7 @@ pub unsafe fn _mm_maskz_cvtepi32_epi8(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi64_epi32&expand=1481) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqd))] pub unsafe fn _mm512_cvtepi64_epi32(a: __m512i) -> __m256i { let a = a.as_i64x8(); @@ -11953,6 +12749,7 @@ pub unsafe fn _mm512_cvtepi64_epi32(a: __m512i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi64_epi32&expand=1482) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqd))] pub unsafe fn _mm512_mask_cvtepi64_epi32(src: __m256i, k: __mmask8, a: __m512i) -> __m256i { let convert = _mm512_cvtepi64_epi32(a).as_i32x8(); @@ -11964,6 +12761,7 @@ pub unsafe fn _mm512_mask_cvtepi64_epi32(src: __m256i, k: __mmask8, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi64_epi32&expand=1483) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqd))] pub unsafe fn _mm512_maskz_cvtepi64_epi32(k: __mmask8, a: __m512i) -> __m256i { let convert = _mm512_cvtepi64_epi32(a).as_i32x8(); @@ -11976,6 +12774,7 @@ pub unsafe fn _mm512_maskz_cvtepi64_epi32(k: __mmask8, a: __m512i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi64_epi32&expand=1478) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqd))] pub unsafe fn _mm256_cvtepi64_epi32(a: __m256i) -> __m128i { let a = a.as_i64x4(); @@ -11987,6 +12786,7 @@ pub unsafe fn _mm256_cvtepi64_epi32(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi64_epi32&expand=1479) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqd))] pub unsafe fn _mm256_mask_cvtepi64_epi32(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { let convert = _mm256_cvtepi64_epi32(a).as_i32x4(); @@ -11998,6 +12798,7 @@ pub unsafe fn _mm256_mask_cvtepi64_epi32(src: __m128i, k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi64_epi32&expand=1480) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqd))] pub unsafe fn _mm256_maskz_cvtepi64_epi32(k: __mmask8, a: __m256i) -> __m128i { let convert = _mm256_cvtepi64_epi32(a).as_i32x4(); @@ -12010,6 +12811,7 @@ pub unsafe fn _mm256_maskz_cvtepi64_epi32(k: __mmask8, a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi64_epi32&expand=1475) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqd))] pub unsafe fn _mm_cvtepi64_epi32(a: __m128i) -> __m128i { transmute(vpmovqd128( @@ -12024,6 +12826,7 @@ pub unsafe fn _mm_cvtepi64_epi32(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi64_epi32&expand=1476) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqd))] pub unsafe fn _mm_mask_cvtepi64_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovqd128(a.as_i64x2(), src.as_i32x4(), k)) @@ -12034,6 +12837,7 @@ pub unsafe fn _mm_mask_cvtepi64_epi32(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi64_epi32&expand=1477) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqd))] pub unsafe fn _mm_maskz_cvtepi64_epi32(k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovqd128(a.as_i64x2(), _mm_setzero_si128().as_i32x4(), k)) @@ -12044,6 +12848,7 @@ pub unsafe fn _mm_maskz_cvtepi64_epi32(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi64_epi16&expand=1472) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqw))] pub unsafe fn _mm512_cvtepi64_epi16(a: __m512i) -> __m128i { let a = a.as_i64x8(); @@ -12055,6 +12860,7 @@ pub unsafe fn _mm512_cvtepi64_epi16(a: __m512i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi64_epi16&expand=1473) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqw))] pub unsafe fn _mm512_mask_cvtepi64_epi16(src: __m128i, k: __mmask8, a: __m512i) -> __m128i { let convert = _mm512_cvtepi64_epi16(a).as_i16x8(); @@ -12066,6 +12872,7 @@ pub unsafe fn _mm512_mask_cvtepi64_epi16(src: __m128i, k: __mmask8, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi64_epi16&expand=1474) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqw))] pub unsafe fn _mm512_maskz_cvtepi64_epi16(k: __mmask8, a: __m512i) -> __m128i { let convert = _mm512_cvtepi64_epi16(a).as_i16x8(); @@ -12078,6 +12885,7 @@ pub unsafe fn _mm512_maskz_cvtepi64_epi16(k: __mmask8, a: __m512i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi64_epi16&expand=1469) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqw))] pub unsafe fn _mm256_cvtepi64_epi16(a: __m256i) -> __m128i { transmute(vpmovqw256( @@ -12092,6 +12900,7 @@ pub unsafe fn _mm256_cvtepi64_epi16(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi64_epi16&expand=1470) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqw))] pub unsafe fn _mm256_mask_cvtepi64_epi16(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovqw256(a.as_i64x4(), src.as_i16x8(), k)) @@ -12102,6 +12911,7 @@ pub unsafe fn _mm256_mask_cvtepi64_epi16(src: __m128i, k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi64_epi16&expand=1471) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqw))] pub unsafe fn _mm256_maskz_cvtepi64_epi16(k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovqw256(a.as_i64x4(), _mm_setzero_si128().as_i16x8(), k)) @@ -12112,6 +12922,7 @@ pub unsafe fn _mm256_maskz_cvtepi64_epi16(k: __mmask8, a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi64_epi16&expand=1466) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqw))] pub unsafe fn _mm_cvtepi64_epi16(a: __m128i) -> __m128i { transmute(vpmovqw128( @@ -12126,6 +12937,7 @@ pub unsafe fn _mm_cvtepi64_epi16(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi64_epi16&expand=1467) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqw))] pub unsafe fn _mm_mask_cvtepi64_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovqw128(a.as_i64x2(), src.as_i16x8(), k)) @@ -12136,6 +12948,7 @@ pub unsafe fn _mm_mask_cvtepi64_epi16(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi64_epi16&expand=1468) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqw))] pub unsafe fn _mm_maskz_cvtepi64_epi16(k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovqw128(a.as_i64x2(), _mm_setzero_si128().as_i16x8(), k)) @@ -12146,6 +12959,7 @@ pub unsafe fn _mm_maskz_cvtepi64_epi16(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi64_epi8&expand=1490) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqb))] pub unsafe fn _mm512_cvtepi64_epi8(a: __m512i) -> __m128i { transmute(vpmovqb( @@ -12160,6 +12974,7 @@ pub unsafe fn _mm512_cvtepi64_epi8(a: __m512i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi64_epi8&expand=1491) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqb))] pub unsafe fn _mm512_mask_cvtepi64_epi8(src: __m128i, k: __mmask8, a: __m512i) -> __m128i { transmute(vpmovqb(a.as_i64x8(), src.as_i8x16(), k)) @@ -12170,6 +12985,7 @@ pub unsafe fn _mm512_mask_cvtepi64_epi8(src: __m128i, k: __mmask8, a: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi64_epi8&expand=1492) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqb))] pub unsafe fn _mm512_maskz_cvtepi64_epi8(k: __mmask8, a: __m512i) -> __m128i { transmute(vpmovqb(a.as_i64x8(), _mm_setzero_si128().as_i8x16(), k)) @@ -12180,6 +12996,7 @@ pub unsafe fn _mm512_maskz_cvtepi64_epi8(k: __mmask8, a: __m512i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi64_epi8&expand=1487) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqb))] pub unsafe fn _mm256_cvtepi64_epi8(a: __m256i) -> __m128i { transmute(vpmovqb256( @@ -12194,6 +13011,7 @@ pub unsafe fn _mm256_cvtepi64_epi8(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi64_epi8&expand=1488) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqb))] pub unsafe fn _mm256_mask_cvtepi64_epi8(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovqb256(a.as_i64x4(), src.as_i8x16(), k)) @@ -12204,6 +13022,7 @@ pub unsafe fn _mm256_mask_cvtepi64_epi8(src: __m128i, k: __mmask8, a: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi64_epi8&expand=1489) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqb))] pub unsafe fn _mm256_maskz_cvtepi64_epi8(k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovqb256(a.as_i64x4(), _mm_setzero_si128().as_i8x16(), k)) @@ -12214,6 +13033,7 @@ pub unsafe fn _mm256_maskz_cvtepi64_epi8(k: __mmask8, a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi64_epi8&expand=1484) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqb))] pub unsafe fn _mm_cvtepi64_epi8(a: __m128i) -> __m128i { transmute(vpmovqb128( @@ -12228,6 +13048,7 @@ pub unsafe fn _mm_cvtepi64_epi8(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi64_epi8&expand=1485) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqb))] pub unsafe fn _mm_mask_cvtepi64_epi8(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovqb128(a.as_i64x2(), src.as_i8x16(), k)) @@ -12238,6 +13059,7 @@ pub unsafe fn _mm_mask_cvtepi64_epi8(src: __m128i, k: __mmask8, a: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi64_epi8&expand=1486) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqb))] pub unsafe fn _mm_maskz_cvtepi64_epi8(k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovqb128(a.as_i64x2(), _mm_setzero_si128().as_i8x16(), k)) @@ -12248,6 +13070,7 @@ pub unsafe fn _mm_maskz_cvtepi64_epi8(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtsepi32_epi16&expand=1819) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdw))] pub unsafe fn _mm512_cvtsepi32_epi16(a: __m512i) -> __m256i { transmute(vpmovsdw( @@ -12262,6 +13085,7 @@ pub unsafe fn _mm512_cvtsepi32_epi16(a: __m512i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi32_epi16&expand=1820) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdw))] pub unsafe fn _mm512_mask_cvtsepi32_epi16(src: __m256i, k: __mmask16, a: __m512i) -> __m256i { transmute(vpmovsdw(a.as_i32x16(), src.as_i16x16(), k)) @@ -12272,6 +13096,7 @@ pub unsafe fn _mm512_mask_cvtsepi32_epi16(src: __m256i, k: __mmask16, a: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtsepi32_epi16&expand=1819) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdw))] pub unsafe fn _mm512_maskz_cvtsepi32_epi16(k: __mmask16, a: __m512i) -> __m256i { transmute(vpmovsdw( @@ -12286,6 +13111,7 @@ pub unsafe fn _mm512_maskz_cvtsepi32_epi16(k: __mmask16, a: __m512i) -> __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtsepi32_epi16&expand=1816) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdw))] pub unsafe fn _mm256_cvtsepi32_epi16(a: __m256i) -> __m128i { transmute(vpmovsdw256( @@ -12300,6 +13126,7 @@ pub unsafe fn _mm256_cvtsepi32_epi16(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi32_epi16&expand=1817) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdw))] pub unsafe fn _mm256_mask_cvtsepi32_epi16(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovsdw256(a.as_i32x8(), src.as_i16x8(), k)) @@ -12310,6 +13137,7 @@ pub unsafe fn _mm256_mask_cvtsepi32_epi16(src: __m128i, k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtsepi32_epi16&expand=1818) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdw))] pub unsafe fn _mm256_maskz_cvtsepi32_epi16(k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovsdw256(a.as_i32x8(), _mm_setzero_si128().as_i16x8(), k)) @@ -12320,6 +13148,7 @@ pub unsafe fn _mm256_maskz_cvtsepi32_epi16(k: __mmask8, a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsepi32_epi16&expand=1813) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdw))] pub unsafe fn _mm_cvtsepi32_epi16(a: __m128i) -> __m128i { transmute(vpmovsdw128( @@ -12334,6 +13163,7 @@ pub unsafe fn _mm_cvtsepi32_epi16(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi32_epi16&expand=1814) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdw))] pub unsafe fn _mm_mask_cvtsepi32_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovsdw128(a.as_i32x4(), src.as_i16x8(), k)) @@ -12344,6 +13174,7 @@ pub unsafe fn _mm_mask_cvtsepi32_epi16(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtsepi32_epi16&expand=1815) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdw))] pub unsafe fn _mm_maskz_cvtsepi32_epi16(k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovsdw128(a.as_i32x4(), _mm_setzero_si128().as_i16x8(), k)) @@ -12354,6 +13185,7 @@ pub unsafe fn _mm_maskz_cvtsepi32_epi16(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtsepi32_epi8&expand=1828) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdb))] pub unsafe fn _mm512_cvtsepi32_epi8(a: __m512i) -> __m128i { transmute(vpmovsdb( @@ -12368,6 +13200,7 @@ pub unsafe fn _mm512_cvtsepi32_epi8(a: __m512i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi32_epi8&expand=1829) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdb))] pub unsafe fn _mm512_mask_cvtsepi32_epi8(src: __m128i, k: __mmask16, a: __m512i) -> __m128i { transmute(vpmovsdb(a.as_i32x16(), src.as_i8x16(), k)) @@ -12378,6 +13211,7 @@ pub unsafe fn _mm512_mask_cvtsepi32_epi8(src: __m128i, k: __mmask16, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtsepi32_epi8&expand=1830) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdb))] pub unsafe fn _mm512_maskz_cvtsepi32_epi8(k: __mmask16, a: __m512i) -> __m128i { transmute(vpmovsdb(a.as_i32x16(), _mm_setzero_si128().as_i8x16(), k)) @@ -12388,6 +13222,7 @@ pub unsafe fn _mm512_maskz_cvtsepi32_epi8(k: __mmask16, a: __m512i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtsepi32_epi8&expand=1825) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdb))] pub unsafe fn _mm256_cvtsepi32_epi8(a: __m256i) -> __m128i { transmute(vpmovsdb256( @@ -12402,6 +13237,7 @@ pub unsafe fn _mm256_cvtsepi32_epi8(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi32_epi8&expand=1826) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdb))] pub unsafe fn _mm256_mask_cvtsepi32_epi8(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovsdb256(a.as_i32x8(), src.as_i8x16(), k)) @@ -12412,6 +13248,7 @@ pub unsafe fn _mm256_mask_cvtsepi32_epi8(src: __m128i, k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtsepi32_epi8&expand=1827) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdb))] pub unsafe fn _mm256_maskz_cvtsepi32_epi8(k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovsdb256(a.as_i32x8(), _mm_setzero_si128().as_i8x16(), k)) @@ -12422,6 +13259,7 @@ pub unsafe fn _mm256_maskz_cvtsepi32_epi8(k: __mmask8, a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsepi32_epi8&expand=1822) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdb))] pub unsafe fn _mm_cvtsepi32_epi8(a: __m128i) -> __m128i { transmute(vpmovsdb128( @@ -12436,6 +13274,7 @@ pub unsafe fn _mm_cvtsepi32_epi8(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi32_epi8&expand=1823) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdb))] pub unsafe fn _mm_mask_cvtsepi32_epi8(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovsdb128(a.as_i32x4(), src.as_i8x16(), k)) @@ -12446,6 +13285,7 @@ pub unsafe fn _mm_mask_cvtsepi32_epi8(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtsepi32_epi8&expand=1824) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdb))] pub unsafe fn _mm_maskz_cvtsepi32_epi8(k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovsdb128(a.as_i32x4(), _mm_setzero_si128().as_i8x16(), k)) @@ -12456,6 +13296,7 @@ pub unsafe fn _mm_maskz_cvtsepi32_epi8(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtsepi64_epi32&expand=1852) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqd))] pub unsafe fn _mm512_cvtsepi64_epi32(a: __m512i) -> __m256i { transmute(vpmovsqd( @@ -12470,6 +13311,7 @@ pub unsafe fn _mm512_cvtsepi64_epi32(a: __m512i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi64_epi32&expand=1853) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqd))] pub unsafe fn _mm512_mask_cvtsepi64_epi32(src: __m256i, k: __mmask8, a: __m512i) -> __m256i { transmute(vpmovsqd(a.as_i64x8(), src.as_i32x8(), k)) @@ -12480,6 +13322,7 @@ pub unsafe fn _mm512_mask_cvtsepi64_epi32(src: __m256i, k: __mmask8, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtsepi64_epi32&expand=1854) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqd))] pub unsafe fn _mm512_maskz_cvtsepi64_epi32(k: __mmask8, a: __m512i) -> __m256i { transmute(vpmovsqd(a.as_i64x8(), _mm256_setzero_si256().as_i32x8(), k)) @@ -12490,6 +13333,7 @@ pub unsafe fn _mm512_maskz_cvtsepi64_epi32(k: __mmask8, a: __m512i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtsepi64_epi32&expand=1849) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqd))] pub unsafe fn _mm256_cvtsepi64_epi32(a: __m256i) -> __m128i { transmute(vpmovsqd256( @@ -12504,6 +13348,7 @@ pub unsafe fn _mm256_cvtsepi64_epi32(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi64_epi32&expand=1850) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqd))] pub unsafe fn _mm256_mask_cvtsepi64_epi32(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovsqd256(a.as_i64x4(), src.as_i32x4(), k)) @@ -12514,6 +13359,7 @@ pub unsafe fn _mm256_mask_cvtsepi64_epi32(src: __m128i, k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtsepi64_epi32&expand=1851) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqd))] pub unsafe fn _mm256_maskz_cvtsepi64_epi32(k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovsqd256(a.as_i64x4(), _mm_setzero_si128().as_i32x4(), k)) @@ -12524,6 +13370,7 @@ pub unsafe fn _mm256_maskz_cvtsepi64_epi32(k: __mmask8, a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsepi64_epi32&expand=1846) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqd))] pub unsafe fn _mm_cvtsepi64_epi32(a: __m128i) -> __m128i { transmute(vpmovsqd128( @@ -12538,6 +13385,7 @@ pub unsafe fn _mm_cvtsepi64_epi32(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi64_epi32&expand=1847) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqd))] pub unsafe fn _mm_mask_cvtsepi64_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovsqd128(a.as_i64x2(), src.as_i32x4(), k)) @@ -12548,6 +13396,7 @@ pub unsafe fn _mm_mask_cvtsepi64_epi32(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtsepi64_epi32&expand=1848) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqd))] pub unsafe fn _mm_maskz_cvtsepi64_epi32(k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovsqd128(a.as_i64x2(), _mm_setzero_si128().as_i32x4(), k)) @@ -12558,6 +13407,7 @@ pub unsafe fn _mm_maskz_cvtsepi64_epi32(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtsepi64_epi16&expand=1843) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqw))] pub unsafe fn _mm512_cvtsepi64_epi16(a: __m512i) -> __m128i { transmute(vpmovsqw( @@ -12572,6 +13422,7 @@ pub unsafe fn _mm512_cvtsepi64_epi16(a: __m512i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi64_epi16&expand=1844) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqw))] pub unsafe fn _mm512_mask_cvtsepi64_epi16(src: __m128i, k: __mmask8, a: __m512i) -> __m128i { transmute(vpmovsqw(a.as_i64x8(), src.as_i16x8(), k)) @@ -12582,6 +13433,7 @@ pub unsafe fn _mm512_mask_cvtsepi64_epi16(src: __m128i, k: __mmask8, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtsepi64_epi16&expand=1845) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqw))] pub unsafe fn _mm512_maskz_cvtsepi64_epi16(k: __mmask8, a: __m512i) -> __m128i { transmute(vpmovsqw(a.as_i64x8(), _mm_setzero_si128().as_i16x8(), k)) @@ -12592,6 +13444,7 @@ pub unsafe fn _mm512_maskz_cvtsepi64_epi16(k: __mmask8, a: __m512i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtsepi64_epi16&expand=1840) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqw))] pub unsafe fn _mm256_cvtsepi64_epi16(a: __m256i) -> __m128i { transmute(vpmovsqw256( @@ -12606,6 +13459,7 @@ pub unsafe fn _mm256_cvtsepi64_epi16(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi64_epi16&expand=1841) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqw))] pub unsafe fn _mm256_mask_cvtsepi64_epi16(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovsqw256(a.as_i64x4(), src.as_i16x8(), k)) @@ -12616,6 +13470,7 @@ pub unsafe fn _mm256_mask_cvtsepi64_epi16(src: __m128i, k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtsepi64_epi16&expand=1842) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqw))] pub unsafe fn _mm256_maskz_cvtsepi64_epi16(k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovsqw256(a.as_i64x4(), _mm_setzero_si128().as_i16x8(), k)) @@ -12626,6 +13481,7 @@ pub unsafe fn _mm256_maskz_cvtsepi64_epi16(k: __mmask8, a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsepi64_epi16&expand=1837) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqw))] pub unsafe fn _mm_cvtsepi64_epi16(a: __m128i) -> __m128i { transmute(vpmovsqw128( @@ -12640,6 +13496,7 @@ pub unsafe fn _mm_cvtsepi64_epi16(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi64_epi16&expand=1838) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqw))] pub unsafe fn _mm_mask_cvtsepi64_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovsqw128(a.as_i64x2(), src.as_i16x8(), k)) @@ -12650,6 +13507,7 @@ pub unsafe fn _mm_mask_cvtsepi64_epi16(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtsepi64_epi16&expand=1839) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqw))] pub unsafe fn _mm_maskz_cvtsepi64_epi16(k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovsqw128(a.as_i64x2(), _mm_setzero_si128().as_i16x8(), k)) @@ -12660,6 +13518,7 @@ pub unsafe fn _mm_maskz_cvtsepi64_epi16(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtsepi64_epi8&expand=1861) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqb))] pub unsafe fn _mm512_cvtsepi64_epi8(a: __m512i) -> __m128i { transmute(vpmovsqb( @@ -12674,6 +13533,7 @@ pub unsafe fn _mm512_cvtsepi64_epi8(a: __m512i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi64_epi8&expand=1862) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqb))] pub unsafe fn _mm512_mask_cvtsepi64_epi8(src: __m128i, k: __mmask8, a: __m512i) -> __m128i { transmute(vpmovsqb(a.as_i64x8(), src.as_i8x16(), k)) @@ -12684,6 +13544,7 @@ pub unsafe fn _mm512_mask_cvtsepi64_epi8(src: __m128i, k: __mmask8, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtsepi64_epi8&expand=1863) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqb))] pub unsafe fn _mm512_maskz_cvtsepi64_epi8(k: __mmask8, a: __m512i) -> __m128i { transmute(vpmovsqb(a.as_i64x8(), _mm_setzero_si128().as_i8x16(), k)) @@ -12694,6 +13555,7 @@ pub unsafe fn _mm512_maskz_cvtsepi64_epi8(k: __mmask8, a: __m512i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtsepi64_epi8&expand=1858) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqb))] pub unsafe fn _mm256_cvtsepi64_epi8(a: __m256i) -> __m128i { transmute(vpmovsqb256( @@ -12708,6 +13570,7 @@ pub unsafe fn _mm256_cvtsepi64_epi8(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi64_epi8&expand=1859) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqb))] pub unsafe fn _mm256_mask_cvtsepi64_epi8(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovsqb256(a.as_i64x4(), src.as_i8x16(), k)) @@ -12718,6 +13581,7 @@ pub unsafe fn _mm256_mask_cvtsepi64_epi8(src: __m128i, k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtsepi64_epi8&expand=1860) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqb))] pub unsafe fn _mm256_maskz_cvtsepi64_epi8(k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovsqb256(a.as_i64x4(), _mm_setzero_si128().as_i8x16(), k)) @@ -12728,6 +13592,7 @@ pub unsafe fn _mm256_maskz_cvtsepi64_epi8(k: __mmask8, a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsepi64_epi8&expand=1855) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqb))] pub unsafe fn _mm_cvtsepi64_epi8(a: __m128i) -> __m128i { transmute(vpmovsqb128( @@ -12742,6 +13607,7 @@ pub unsafe fn _mm_cvtsepi64_epi8(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi64_epi8&expand=1856) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqb))] pub unsafe fn _mm_mask_cvtsepi64_epi8(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovsqb128(a.as_i64x2(), src.as_i8x16(), k)) @@ -12752,6 +13618,7 @@ pub unsafe fn _mm_mask_cvtsepi64_epi8(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtsepi64_epi8&expand=1857) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqb))] pub unsafe fn _mm_maskz_cvtsepi64_epi8(k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovsqb128(a.as_i64x2(), _mm_setzero_si128().as_i8x16(), k)) @@ -12762,6 +13629,7 @@ pub unsafe fn _mm_maskz_cvtsepi64_epi8(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtusepi32_epi16&expand=2054) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdw))] pub unsafe fn _mm512_cvtusepi32_epi16(a: __m512i) -> __m256i { transmute(vpmovusdw( @@ -12776,6 +13644,7 @@ pub unsafe fn _mm512_cvtusepi32_epi16(a: __m512i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi32_epi16&expand=2055) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdw))] pub unsafe fn _mm512_mask_cvtusepi32_epi16(src: __m256i, k: __mmask16, a: __m512i) -> __m256i { transmute(vpmovusdw(a.as_u32x16(), src.as_u16x16(), k)) @@ -12786,6 +13655,7 @@ pub unsafe fn _mm512_mask_cvtusepi32_epi16(src: __m256i, k: __mmask16, a: __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtusepi32_epi16&expand=2056) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdw))] pub unsafe fn _mm512_maskz_cvtusepi32_epi16(k: __mmask16, a: __m512i) -> __m256i { transmute(vpmovusdw( @@ -12800,6 +13670,7 @@ pub unsafe fn _mm512_maskz_cvtusepi32_epi16(k: __mmask16, a: __m512i) -> __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtusepi32_epi16&expand=2051) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdw))] pub unsafe fn _mm256_cvtusepi32_epi16(a: __m256i) -> __m128i { transmute(vpmovusdw256( @@ -12814,6 +13685,7 @@ pub unsafe fn _mm256_cvtusepi32_epi16(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi32_epi16&expand=2052) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdw))] pub unsafe fn _mm256_mask_cvtusepi32_epi16(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovusdw256(a.as_u32x8(), src.as_u16x8(), k)) @@ -12824,6 +13696,7 @@ pub unsafe fn _mm256_mask_cvtusepi32_epi16(src: __m128i, k: __mmask8, a: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtusepi32_epi16&expand=2053) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdw))] pub unsafe fn _mm256_maskz_cvtusepi32_epi16(k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovusdw256( @@ -12838,6 +13711,7 @@ pub unsafe fn _mm256_maskz_cvtusepi32_epi16(k: __mmask8, a: __m256i) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtusepi32_epi16&expand=2048) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdw))] pub unsafe fn _mm_cvtusepi32_epi16(a: __m128i) -> __m128i { transmute(vpmovusdw128( @@ -12852,6 +13726,7 @@ pub unsafe fn _mm_cvtusepi32_epi16(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi32_epi16&expand=2049) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdw))] pub unsafe fn _mm_mask_cvtusepi32_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovusdw128(a.as_u32x4(), src.as_u16x8(), k)) @@ -12862,6 +13737,7 @@ pub unsafe fn _mm_mask_cvtusepi32_epi16(src: __m128i, k: __mmask8, a: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtusepi32_epi16&expand=2050) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdw))] pub unsafe fn _mm_maskz_cvtusepi32_epi16(k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovusdw128( @@ -12876,6 +13752,7 @@ pub unsafe fn _mm_maskz_cvtusepi32_epi16(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtusepi32_epi8&expand=2063) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdb))] pub unsafe fn _mm512_cvtusepi32_epi8(a: __m512i) -> __m128i { transmute(vpmovusdb( @@ -12890,6 +13767,7 @@ pub unsafe fn _mm512_cvtusepi32_epi8(a: __m512i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi32_epi8&expand=2064) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdb))] pub unsafe fn _mm512_mask_cvtusepi32_epi8(src: __m128i, k: __mmask16, a: __m512i) -> __m128i { transmute(vpmovusdb(a.as_u32x16(), src.as_u8x16(), k)) @@ -12900,6 +13778,7 @@ pub unsafe fn _mm512_mask_cvtusepi32_epi8(src: __m128i, k: __mmask16, a: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtusepi32_epi8&expand=2065) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdb))] pub unsafe fn _mm512_maskz_cvtusepi32_epi8(k: __mmask16, a: __m512i) -> __m128i { transmute(vpmovusdb(a.as_u32x16(), _mm_setzero_si128().as_u8x16(), k)) @@ -12910,6 +13789,7 @@ pub unsafe fn _mm512_maskz_cvtusepi32_epi8(k: __mmask16, a: __m512i) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtusepi32_epi8&expand=2060) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdb))] pub unsafe fn _mm256_cvtusepi32_epi8(a: __m256i) -> __m128i { transmute(vpmovusdb256( @@ -12924,6 +13804,7 @@ pub unsafe fn _mm256_cvtusepi32_epi8(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi32_epi8&expand=2061) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdb))] pub unsafe fn _mm256_mask_cvtusepi32_epi8(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovusdb256(a.as_u32x8(), src.as_u8x16(), k)) @@ -12934,6 +13815,7 @@ pub unsafe fn _mm256_mask_cvtusepi32_epi8(src: __m128i, k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtusepi32_epi8&expand=2062) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdb))] pub unsafe fn _mm256_maskz_cvtusepi32_epi8(k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovusdb256( @@ -12948,6 +13830,7 @@ pub unsafe fn _mm256_maskz_cvtusepi32_epi8(k: __mmask8, a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtusepi32_epi8&expand=2057) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdb))] pub unsafe fn _mm_cvtusepi32_epi8(a: __m128i) -> __m128i { transmute(vpmovusdb128( @@ -12962,6 +13845,7 @@ pub unsafe fn _mm_cvtusepi32_epi8(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi32_epi8&expand=2058) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdb))] pub unsafe fn _mm_mask_cvtusepi32_epi8(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovusdb128(a.as_u32x4(), src.as_u8x16(), k)) @@ -12972,6 +13856,7 @@ pub unsafe fn _mm_mask_cvtusepi32_epi8(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtusepi32_epi8&expand=2059) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdb))] pub unsafe fn _mm_maskz_cvtusepi32_epi8(k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovusdb128( @@ -12986,6 +13871,7 @@ pub unsafe fn _mm_maskz_cvtusepi32_epi8(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtusepi64_epi32&expand=2087) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqd))] pub unsafe fn _mm512_cvtusepi64_epi32(a: __m512i) -> __m256i { transmute(vpmovusqd( @@ -13000,6 +13886,7 @@ pub unsafe fn _mm512_cvtusepi64_epi32(a: __m512i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi64_epi32&expand=2088) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqd))] pub unsafe fn _mm512_mask_cvtusepi64_epi32(src: __m256i, k: __mmask8, a: __m512i) -> __m256i { transmute(vpmovusqd(a.as_u64x8(), src.as_u32x8(), k)) @@ -13010,6 +13897,7 @@ pub unsafe fn _mm512_mask_cvtusepi64_epi32(src: __m256i, k: __mmask8, a: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtusepi64_epi32&expand=2089) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqd))] pub unsafe fn _mm512_maskz_cvtusepi64_epi32(k: __mmask8, a: __m512i) -> __m256i { transmute(vpmovusqd( @@ -13024,6 +13912,7 @@ pub unsafe fn _mm512_maskz_cvtusepi64_epi32(k: __mmask8, a: __m512i) -> __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtusepi64_epi32&expand=2084) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqd))] pub unsafe fn _mm256_cvtusepi64_epi32(a: __m256i) -> __m128i { transmute(vpmovusqd256( @@ -13038,6 +13927,7 @@ pub unsafe fn _mm256_cvtusepi64_epi32(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi64_epi32&expand=2085) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqd))] pub unsafe fn _mm256_mask_cvtusepi64_epi32(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovusqd256(a.as_u64x4(), src.as_u32x4(), k)) @@ -13048,6 +13938,7 @@ pub unsafe fn _mm256_mask_cvtusepi64_epi32(src: __m128i, k: __mmask8, a: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtusepi64_epi32&expand=2086) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqd))] pub unsafe fn _mm256_maskz_cvtusepi64_epi32(k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovusqd256( @@ -13062,6 +13953,7 @@ pub unsafe fn _mm256_maskz_cvtusepi64_epi32(k: __mmask8, a: __m256i) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtusepi64_epi32&expand=2081) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqd))] pub unsafe fn _mm_cvtusepi64_epi32(a: __m128i) -> __m128i { transmute(vpmovusqd128( @@ -13076,6 +13968,7 @@ pub unsafe fn _mm_cvtusepi64_epi32(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi64_epi32&expand=2082) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqd))] pub unsafe fn _mm_mask_cvtusepi64_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovusqd128(a.as_u64x2(), src.as_u32x4(), k)) @@ -13086,6 +13979,7 @@ pub unsafe fn _mm_mask_cvtusepi64_epi32(src: __m128i, k: __mmask8, a: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtusepi64_epi32&expand=2083) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqd))] pub unsafe fn _mm_maskz_cvtusepi64_epi32(k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovusqd128( @@ -13100,6 +13994,7 @@ pub unsafe fn _mm_maskz_cvtusepi64_epi32(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtusepi64_epi16&expand=2078) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqw))] pub unsafe fn _mm512_cvtusepi64_epi16(a: __m512i) -> __m128i { transmute(vpmovusqw( @@ -13114,6 +14009,7 @@ pub unsafe fn _mm512_cvtusepi64_epi16(a: __m512i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi64_epi16&expand=2079) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqw))] pub unsafe fn _mm512_mask_cvtusepi64_epi16(src: __m128i, k: __mmask8, a: __m512i) -> __m128i { transmute(vpmovusqw(a.as_u64x8(), src.as_u16x8(), k)) @@ -13124,6 +14020,7 @@ pub unsafe fn _mm512_mask_cvtusepi64_epi16(src: __m128i, k: __mmask8, a: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtusepi64_epi16&expand=2080) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqw))] pub unsafe fn _mm512_maskz_cvtusepi64_epi16(k: __mmask8, a: __m512i) -> __m128i { transmute(vpmovusqw(a.as_u64x8(), _mm_setzero_si128().as_u16x8(), k)) @@ -13134,6 +14031,7 @@ pub unsafe fn _mm512_maskz_cvtusepi64_epi16(k: __mmask8, a: __m512i) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtusepi64_epi16&expand=2075) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqw))] pub unsafe fn _mm256_cvtusepi64_epi16(a: __m256i) -> __m128i { transmute(vpmovusqw256( @@ -13148,6 +14046,7 @@ pub unsafe fn _mm256_cvtusepi64_epi16(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi64_epi16&expand=2076) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqw))] pub unsafe fn _mm256_mask_cvtusepi64_epi16(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovusqw256(a.as_u64x4(), src.as_u16x8(), k)) @@ -13158,6 +14057,7 @@ pub unsafe fn _mm256_mask_cvtusepi64_epi16(src: __m128i, k: __mmask8, a: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtusepi64_epi16&expand=2077) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqw))] pub unsafe fn _mm256_maskz_cvtusepi64_epi16(k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovusqw256( @@ -13172,6 +14072,7 @@ pub unsafe fn _mm256_maskz_cvtusepi64_epi16(k: __mmask8, a: __m256i) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtusepi64_epi16&expand=2072) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqw))] pub unsafe fn _mm_cvtusepi64_epi16(a: __m128i) -> __m128i { transmute(vpmovusqw128( @@ -13186,6 +14087,7 @@ pub unsafe fn _mm_cvtusepi64_epi16(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi64_epi16&expand=2073) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqw))] pub unsafe fn _mm_mask_cvtusepi64_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovusqw128(a.as_u64x2(), src.as_u16x8(), k)) @@ -13196,6 +14098,7 @@ pub unsafe fn _mm_mask_cvtusepi64_epi16(src: __m128i, k: __mmask8, a: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtusepi64_epi16&expand=2074) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqw))] pub unsafe fn _mm_maskz_cvtusepi64_epi16(k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovusqw128( @@ -13210,6 +14113,7 @@ pub unsafe fn _mm_maskz_cvtusepi64_epi16(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtusepi64_epi8&expand=2096) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqb))] pub unsafe fn _mm512_cvtusepi64_epi8(a: __m512i) -> __m128i { transmute(vpmovusqb( @@ -13224,6 +14128,7 @@ pub unsafe fn _mm512_cvtusepi64_epi8(a: __m512i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi64_epi8&expand=2097) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqb))] pub unsafe fn _mm512_mask_cvtusepi64_epi8(src: __m128i, k: __mmask8, a: __m512i) -> __m128i { transmute(vpmovusqb(a.as_u64x8(), src.as_u8x16(), k)) @@ -13234,6 +14139,7 @@ pub unsafe fn _mm512_mask_cvtusepi64_epi8(src: __m128i, k: __mmask8, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtusepi64_epi8&expand=2098) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqb))] pub unsafe fn _mm512_maskz_cvtusepi64_epi8(k: __mmask8, a: __m512i) -> __m128i { transmute(vpmovusqb(a.as_u64x8(), _mm_setzero_si128().as_u8x16(), k)) @@ -13244,6 +14150,7 @@ pub unsafe fn _mm512_maskz_cvtusepi64_epi8(k: __mmask8, a: __m512i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtusepi64_epi8&expand=2093) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqb))] pub unsafe fn _mm256_cvtusepi64_epi8(a: __m256i) -> __m128i { transmute(vpmovusqb256( @@ -13258,6 +14165,7 @@ pub unsafe fn _mm256_cvtusepi64_epi8(a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi64_epi8&expand=2094) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqb))] pub unsafe fn _mm256_mask_cvtusepi64_epi8(src: __m128i, k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovusqb256(a.as_u64x4(), src.as_u8x16(), k)) @@ -13268,6 +14176,7 @@ pub unsafe fn _mm256_mask_cvtusepi64_epi8(src: __m128i, k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtusepi64_epi8&expand=2095) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqb))] pub unsafe fn _mm256_maskz_cvtusepi64_epi8(k: __mmask8, a: __m256i) -> __m128i { transmute(vpmovusqb256( @@ -13282,6 +14191,7 @@ pub unsafe fn _mm256_maskz_cvtusepi64_epi8(k: __mmask8, a: __m256i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtusepi64_epi8&expand=2090) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqb))] pub unsafe fn _mm_cvtusepi64_epi8(a: __m128i) -> __m128i { transmute(vpmovusqb128( @@ -13296,6 +14206,7 @@ pub unsafe fn _mm_cvtusepi64_epi8(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi64_epi8&expand=2091) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqb))] pub unsafe fn _mm_mask_cvtusepi64_epi8(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovusqb128(a.as_u64x2(), src.as_u8x16(), k)) @@ -13306,6 +14217,7 @@ pub unsafe fn _mm_mask_cvtusepi64_epi8(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtusepi64_epi8&expand=2092) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqb))] pub unsafe fn _mm_maskz_cvtusepi64_epi8(k: __mmask8, a: __m128i) -> __m128i { transmute(vpmovusqb128( @@ -13327,6 +14239,7 @@ pub unsafe fn _mm_maskz_cvtusepi64_epi8(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundps_epi32&expand=1335) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2dq, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_cvt_roundps_epi32(a: __m512) -> __m512i { @@ -13349,6 +14262,7 @@ pub unsafe fn _mm512_cvt_roundps_epi32(a: __m512) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundps_epi32&expand=1336) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2dq, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_cvt_roundps_epi32( @@ -13375,6 +14289,7 @@ pub unsafe fn _mm512_mask_cvt_roundps_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundps_epi32&expand=1337) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2dq, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_cvt_roundps_epi32( @@ -13400,6 +14315,7 @@ pub unsafe fn _mm512_maskz_cvt_roundps_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundps_epu32&expand=1341) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2udq, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_cvt_roundps_epu32(a: __m512) -> __m512i { @@ -13422,6 +14338,7 @@ pub unsafe fn _mm512_cvt_roundps_epu32(a: __m512) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundps_epu32&expand=1342) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2udq, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_cvt_roundps_epu32( @@ -13448,6 +14365,7 @@ pub unsafe fn _mm512_mask_cvt_roundps_epu32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundps_epu32&expand=1343) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2udq, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_cvt_roundps_epu32( @@ -13467,6 +14385,7 @@ pub unsafe fn _mm512_maskz_cvt_roundps_epu32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundps_pd&expand=1347) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2pd, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_cvt_roundps_pd(a: __m256) -> __m512d { @@ -13483,6 +14402,7 @@ pub unsafe fn _mm512_cvt_roundps_pd(a: __m256) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundps_epi32&expand=1336) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2pd, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_cvt_roundps_pd( @@ -13503,6 +14423,7 @@ pub unsafe fn _mm512_mask_cvt_roundps_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundps_epi32&expand=1337) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2pd, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_cvt_roundps_pd(k: __mmask8, a: __m256) -> __m512d { @@ -13525,6 +14446,7 @@ pub unsafe fn _mm512_maskz_cvt_roundps_pd(k: __mmask8, a: __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundpd_epi32&expand=1315) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2dq, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_cvt_roundpd_epi32(a: __m512d) -> __m256i { @@ -13547,6 +14469,7 @@ pub unsafe fn _mm512_cvt_roundpd_epi32(a: __m512d) -> __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundpd_epi32&expand=1316) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2dq, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_cvt_roundpd_epi32( @@ -13573,6 +14496,7 @@ pub unsafe fn _mm512_mask_cvt_roundpd_epi32( /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundpd_epi32&expand=1317) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2dq, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_cvt_roundpd_epi32( @@ -13598,6 +14522,7 @@ pub unsafe fn _mm512_maskz_cvt_roundpd_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundpd_epu32&expand=1321) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2udq, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_cvt_roundpd_epu32(a: __m512d) -> __m256i { @@ -13620,6 +14545,7 @@ pub unsafe fn _mm512_cvt_roundpd_epu32(a: __m512d) -> __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundpd_epu32&expand=1322) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2udq, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_cvt_roundpd_epu32( @@ -13646,6 +14572,7 @@ pub unsafe fn _mm512_mask_cvt_roundpd_epu32( /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundpd_epu32&expand=1323) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2udq, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_cvt_roundpd_epu32( @@ -13671,6 +14598,7 @@ pub unsafe fn _mm512_maskz_cvt_roundpd_epu32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundpd_ps&expand=1327) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2ps, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_cvt_roundpd_ps(a: __m512d) -> __m256 { @@ -13693,6 +14621,7 @@ pub unsafe fn _mm512_cvt_roundpd_ps(a: __m512d) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundpd_ps&expand=1328) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2ps, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_cvt_roundpd_ps( @@ -13719,6 +14648,7 @@ pub unsafe fn _mm512_mask_cvt_roundpd_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundpd_ps&expand=1329) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtpd2ps, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_cvt_roundpd_ps(k: __mmask8, a: __m512d) -> __m256 { @@ -13741,6 +14671,7 @@ pub unsafe fn _mm512_maskz_cvt_roundpd_ps(k: __mmask8, a: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundepi32_ps&expand=1294) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2ps, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_cvt_roundepi32_ps(a: __m512i) -> __m512 { @@ -13762,6 +14693,7 @@ pub unsafe fn _mm512_cvt_roundepi32_ps(a: __m512i) -> __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundepi32_ps&expand=1295) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2ps, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_cvt_roundepi32_ps( @@ -13787,6 +14719,7 @@ pub unsafe fn _mm512_mask_cvt_roundepi32_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundepi32_ps&expand=1296) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtdq2ps, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_cvt_roundepi32_ps( @@ -13812,6 +14745,7 @@ pub unsafe fn _mm512_maskz_cvt_roundepi32_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundepu32_ps&expand=1303) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtudq2ps, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_cvt_roundepu32_ps(a: __m512i) -> __m512 { @@ -13833,6 +14767,7 @@ pub unsafe fn _mm512_cvt_roundepu32_ps(a: __m512i) -> __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundepu32_ps&expand=1304) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtudq2ps, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_cvt_roundepu32_ps( @@ -13858,6 +14793,7 @@ pub unsafe fn _mm512_mask_cvt_roundepu32_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundepu32_ps&expand=1305) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtudq2ps, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_cvt_roundepu32_ps( @@ -13877,6 +14813,7 @@ pub unsafe fn _mm512_maskz_cvt_roundepu32_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundps_ph&expand=1354) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2ph, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_cvt_roundps_ph(a: __m512) -> __m256i { @@ -13893,6 +14830,7 @@ pub unsafe fn _mm512_cvt_roundps_ph(a: __m512) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundps_ph&expand=1355) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2ph, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_cvt_roundps_ph( @@ -13913,6 +14851,7 @@ pub unsafe fn _mm512_mask_cvt_roundps_ph( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundps_ph&expand=1356) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2ph, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_cvt_roundps_ph(k: __mmask16, a: __m512) -> __m256i { @@ -13934,6 +14873,7 @@ pub unsafe fn _mm512_maskz_cvt_roundps_ph(k: __mmask16, a: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvt_roundps_ph&expand=1352) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_cvt_roundps_ph( @@ -13959,6 +14899,7 @@ pub unsafe fn _mm256_mask_cvt_roundps_ph( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvt_roundps_ph&expand=1353) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_cvt_roundps_ph(k: __mmask8, a: __m256) -> __m128i { @@ -13980,6 +14921,7 @@ pub unsafe fn _mm256_maskz_cvt_roundps_ph(k: __mmask8, a: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvt_roundps_ph&expand=1350) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_cvt_roundps_ph( @@ -14005,6 +14947,7 @@ pub unsafe fn _mm_mask_cvt_roundps_ph( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvt_roundps_ph&expand=1351) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_cvt_roundps_ph(k: __mmask8, a: __m128) -> __m128i { @@ -14021,6 +14964,7 @@ pub unsafe fn _mm_maskz_cvt_roundps_ph(k: __mmask8, a: __m128) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtps_ph&expand=1778) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2ph, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_cvtps_ph(a: __m512) -> __m256i { @@ -14037,6 +14981,7 @@ pub unsafe fn _mm512_cvtps_ph(a: __m512) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtps_ph&expand=1779) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2ph, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_cvtps_ph( @@ -14057,6 +15002,7 @@ pub unsafe fn _mm512_mask_cvtps_ph( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtps_ph&expand=1780) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2ph, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_cvtps_ph(k: __mmask16, a: __m512) -> __m256i { @@ -14078,6 +15024,7 @@ pub unsafe fn _mm512_maskz_cvtps_ph(k: __mmask16, a: __m512) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtps_ph&expand=1776) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_cvtps_ph( @@ -14103,6 +15050,7 @@ pub unsafe fn _mm256_mask_cvtps_ph( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtps_ph&expand=1777) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_cvtps_ph(k: __mmask8, a: __m256) -> __m128i { @@ -14124,6 +15072,7 @@ pub unsafe fn _mm256_maskz_cvtps_ph(k: __mmask8, a: __m256) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtps_ph&expand=1773) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_cvtps_ph(src: __m128i, k: __mmask8, a: __m128) -> __m128i { @@ -14145,6 +15094,7 @@ pub unsafe fn _mm_mask_cvtps_ph(src: __m128i, k: __mmask8, a: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtps_ph&expand=1774) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_cvtps_ph(k: __mmask8, a: __m128) -> __m128i { @@ -14161,6 +15111,7 @@ pub unsafe fn _mm_maskz_cvtps_ph(k: __mmask8, a: __m128) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundph_ps&expand=1332) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtph2ps, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_cvt_roundph_ps(a: __m256i) -> __m512 { @@ -14177,6 +15128,7 @@ pub unsafe fn _mm512_cvt_roundph_ps(a: __m256i) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundph_ps&expand=1333) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtph2ps, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_cvt_roundph_ps( @@ -14197,6 +15149,7 @@ pub unsafe fn _mm512_mask_cvt_roundph_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundph_ps&expand=1334) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtph2ps, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_cvt_roundph_ps(k: __mmask16, a: __m256i) -> __m512 { @@ -14212,6 +15165,7 @@ pub unsafe fn _mm512_maskz_cvt_roundph_ps(k: __mmask16, a: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtph_ps&expand=1723) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtph2ps))] pub unsafe fn _mm512_cvtph_ps(a: __m256i) -> __m512 { transmute(vcvtph2ps( @@ -14227,6 +15181,7 @@ pub unsafe fn _mm512_cvtph_ps(a: __m256i) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtph_ps&expand=1724) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtph2ps))] pub unsafe fn _mm512_mask_cvtph_ps(src: __m512, k: __mmask16, a: __m256i) -> __m512 { transmute(vcvtph2ps( @@ -14242,6 +15197,7 @@ pub unsafe fn _mm512_mask_cvtph_ps(src: __m512, k: __mmask16, a: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtph_ps&expand=1725) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtph2ps))] pub unsafe fn _mm512_maskz_cvtph_ps(k: __mmask16, a: __m256i) -> __m512 { transmute(vcvtph2ps( @@ -14257,6 +15213,7 @@ pub unsafe fn _mm512_maskz_cvtph_ps(k: __mmask16, a: __m256i) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtph_ps&expand=1721) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtph2ps))] pub unsafe fn _mm256_mask_cvtph_ps(src: __m256, k: __mmask8, a: __m128i) -> __m256 { let convert = _mm256_cvtph_ps(a); @@ -14268,6 +15225,7 @@ pub unsafe fn _mm256_mask_cvtph_ps(src: __m256, k: __mmask8, a: __m128i) -> __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtph_ps&expand=1722) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtph2ps))] pub unsafe fn _mm256_maskz_cvtph_ps(k: __mmask8, a: __m128i) -> __m256 { let convert = _mm256_cvtph_ps(a); @@ -14280,6 +15238,7 @@ pub unsafe fn _mm256_maskz_cvtph_ps(k: __mmask8, a: __m128i) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtph_ps&expand=1718) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtph2ps))] pub unsafe fn _mm_mask_cvtph_ps(src: __m128, k: __mmask8, a: __m128i) -> __m128 { let convert = _mm_cvtph_ps(a); @@ -14291,6 +15250,7 @@ pub unsafe fn _mm_mask_cvtph_ps(src: __m128, k: __mmask8, a: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtph_ps&expand=1719) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtph2ps))] pub unsafe fn _mm_maskz_cvtph_ps(k: __mmask8, a: __m128i) -> __m128 { let convert = _mm_cvtph_ps(a); @@ -14304,6 +15264,7 @@ pub unsafe fn _mm_maskz_cvtph_ps(k: __mmask8, a: __m128i) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtt_roundps_epi32&expand=1916) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2dq, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_cvtt_roundps_epi32(a: __m512) -> __m512i { @@ -14320,6 +15281,7 @@ pub unsafe fn _mm512_cvtt_roundps_epi32(a: __m512) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtt_roundps_epi32&expand=1917) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2dq, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_cvtt_roundps_epi32( @@ -14340,6 +15302,7 @@ pub unsafe fn _mm512_mask_cvtt_roundps_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtt_roundps_epi32&expand=1918) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2dq, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_cvtt_roundps_epi32(k: __mmask16, a: __m512) -> __m512i { @@ -14356,6 +15319,7 @@ pub unsafe fn _mm512_maskz_cvtt_roundps_epi32(k: __mmask16, a: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtt_roundps_epu32&expand=1922) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2udq, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_cvtt_roundps_epu32(a: __m512) -> __m512i { @@ -14372,6 +15336,7 @@ pub unsafe fn _mm512_cvtt_roundps_epu32(a: __m512) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtt_roundps_epu32&expand=1923) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2udq, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_cvtt_roundps_epu32( @@ -14392,6 +15357,7 @@ pub unsafe fn _mm512_mask_cvtt_roundps_epu32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtt_roundps_epu32&expand=1924) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2udq, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_cvtt_roundps_epu32(k: __mmask16, a: __m512) -> __m512i { @@ -14408,6 +15374,7 @@ pub unsafe fn _mm512_maskz_cvtt_roundps_epu32(k: __mmask16, a: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtt_roundpd_epi32&expand=1904) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2dq, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_cvtt_roundpd_epi32(a: __m512d) -> __m256i { @@ -14424,6 +15391,7 @@ pub unsafe fn _mm512_cvtt_roundpd_epi32(a: __m512d) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtt_roundpd_epi32&expand=1905) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2dq, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_cvtt_roundpd_epi32( @@ -14444,6 +15412,7 @@ pub unsafe fn _mm512_mask_cvtt_roundpd_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtt_roundps_epi32&expand=1918) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2dq, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_cvtt_roundpd_epi32(k: __mmask8, a: __m512d) -> __m256i { @@ -14460,6 +15429,7 @@ pub unsafe fn _mm512_maskz_cvtt_roundpd_epi32(k: __mmask8, a: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtt_roundpd_epu32&expand=1910) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2udq, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_cvtt_roundpd_epu32(a: __m512d) -> __m256i { @@ -14476,6 +15446,7 @@ pub unsafe fn _mm512_cvtt_roundpd_epu32(a: __m512d) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtt_roundpd_epu32&expand=1911) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2udq, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_cvtt_roundpd_epu32( @@ -14495,6 +15466,7 @@ pub unsafe fn _mm512_mask_cvtt_roundpd_epu32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvttps_epi32&expand=1984) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2dq))] pub unsafe fn _mm512_cvttps_epi32(a: __m512) -> __m512i { transmute(vcvttps2dq( @@ -14510,6 +15482,7 @@ pub unsafe fn _mm512_cvttps_epi32(a: __m512) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvttps_epi32&expand=1985) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2dq))] pub unsafe fn _mm512_mask_cvttps_epi32(src: __m512i, k: __mmask16, a: __m512) -> __m512i { transmute(vcvttps2dq( @@ -14525,6 +15498,7 @@ pub unsafe fn _mm512_mask_cvttps_epi32(src: __m512i, k: __mmask16, a: __m512) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvttps_epi32&expand=1986) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2dq))] pub unsafe fn _mm512_maskz_cvttps_epi32(k: __mmask16, a: __m512) -> __m512i { transmute(vcvttps2dq( @@ -14540,6 +15514,7 @@ pub unsafe fn _mm512_maskz_cvttps_epi32(k: __mmask16, a: __m512) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvttps_epi32&expand=1982) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2dq))] pub unsafe fn _mm256_mask_cvttps_epi32(src: __m256i, k: __mmask8, a: __m256) -> __m256i { transmute(vcvttps2dq256(a.as_f32x8(), src.as_i32x8(), k)) @@ -14550,6 +15525,7 @@ pub unsafe fn _mm256_mask_cvttps_epi32(src: __m256i, k: __mmask8, a: __m256) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvttps_epi32&expand=1983) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2dq))] pub unsafe fn _mm256_maskz_cvttps_epi32(k: __mmask8, a: __m256) -> __m256i { transmute(vcvttps2dq256( @@ -14564,6 +15540,7 @@ pub unsafe fn _mm256_maskz_cvttps_epi32(k: __mmask8, a: __m256) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvttps_epi32&expand=1979) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2dq))] pub unsafe fn _mm_mask_cvttps_epi32(src: __m128i, k: __mmask8, a: __m128) -> __m128i { transmute(vcvttps2dq128(a.as_f32x4(), src.as_i32x4(), k)) @@ -14574,6 +15551,7 @@ pub unsafe fn _mm_mask_cvttps_epi32(src: __m128i, k: __mmask8, a: __m128) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvttps_epi32&expand=1980) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2dq))] pub unsafe fn _mm_maskz_cvttps_epi32(k: __mmask8, a: __m128) -> __m128i { transmute(vcvttps2dq128( @@ -14588,6 +15566,7 @@ pub unsafe fn _mm_maskz_cvttps_epi32(k: __mmask8, a: __m128) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvttps_epu32&expand=2002) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2udq))] pub unsafe fn _mm512_cvttps_epu32(a: __m512) -> __m512i { transmute(vcvttps2udq( @@ -14603,6 +15582,7 @@ pub unsafe fn _mm512_cvttps_epu32(a: __m512) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvttps_epu32&expand=2003) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2udq))] pub unsafe fn _mm512_mask_cvttps_epu32(src: __m512i, k: __mmask16, a: __m512) -> __m512i { transmute(vcvttps2udq( @@ -14618,6 +15598,7 @@ pub unsafe fn _mm512_mask_cvttps_epu32(src: __m512i, k: __mmask16, a: __m512) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvttps_epu32&expand=2004) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2udq))] pub unsafe fn _mm512_maskz_cvttps_epu32(k: __mmask16, a: __m512) -> __m512i { transmute(vcvttps2udq( @@ -14633,6 +15614,7 @@ pub unsafe fn _mm512_maskz_cvttps_epu32(k: __mmask16, a: __m512) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvttps_epu32&expand=1999) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2udq))] pub unsafe fn _mm256_cvttps_epu32(a: __m256) -> __m256i { transmute(vcvttps2udq256( @@ -14647,6 +15629,7 @@ pub unsafe fn _mm256_cvttps_epu32(a: __m256) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvttps_epu32&expand=2000) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2udq))] pub unsafe fn _mm256_mask_cvttps_epu32(src: __m256i, k: __mmask8, a: __m256) -> __m256i { transmute(vcvttps2udq256(a.as_f32x8(), src.as_u32x8(), k)) @@ -14657,6 +15640,7 @@ pub unsafe fn _mm256_mask_cvttps_epu32(src: __m256i, k: __mmask8, a: __m256) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvttps_epu32&expand=2001) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2udq))] pub unsafe fn _mm256_maskz_cvttps_epu32(k: __mmask8, a: __m256) -> __m256i { transmute(vcvttps2udq256( @@ -14671,6 +15655,7 @@ pub unsafe fn _mm256_maskz_cvttps_epu32(k: __mmask8, a: __m256) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttps_epu32&expand=1996) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2udq))] pub unsafe fn _mm_cvttps_epu32(a: __m128) -> __m128i { transmute(vcvttps2udq128( @@ -14685,6 +15670,7 @@ pub unsafe fn _mm_cvttps_epu32(a: __m128) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvttps_epu32&expand=1997) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2udq))] pub unsafe fn _mm_mask_cvttps_epu32(src: __m128i, k: __mmask8, a: __m128) -> __m128i { transmute(vcvttps2udq128(a.as_f32x4(), src.as_u32x4(), k)) @@ -14695,6 +15681,7 @@ pub unsafe fn _mm_mask_cvttps_epu32(src: __m128i, k: __mmask8, a: __m128) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvttps_epu32&expand=1998) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttps2udq))] pub unsafe fn _mm_maskz_cvttps_epu32(k: __mmask8, a: __m128) -> __m128i { transmute(vcvttps2udq128( @@ -14710,6 +15697,7 @@ pub unsafe fn _mm_maskz_cvttps_epu32(k: __mmask8, a: __m128) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtt_roundpd_epu32&expand=1912) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2udq, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_cvtt_roundpd_epu32(k: __mmask8, a: __m512d) -> __m256i { @@ -14725,6 +15713,7 @@ pub unsafe fn _mm512_maskz_cvtt_roundpd_epu32(k: __mmask8, a: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvttpd_epi32&expand=1947) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2dq))] pub unsafe fn _mm512_cvttpd_epi32(a: __m512d) -> __m256i { transmute(vcvttpd2dq( @@ -14740,6 +15729,7 @@ pub unsafe fn _mm512_cvttpd_epi32(a: __m512d) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvttpd_epi32&expand=1948) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2dq))] pub unsafe fn _mm512_mask_cvttpd_epi32(src: __m256i, k: __mmask8, a: __m512d) -> __m256i { transmute(vcvttpd2dq( @@ -14755,6 +15745,7 @@ pub unsafe fn _mm512_mask_cvttpd_epi32(src: __m256i, k: __mmask8, a: __m512d) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvttpd_epi32&expand=1949) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2dq))] pub unsafe fn _mm512_maskz_cvttpd_epi32(k: __mmask8, a: __m512d) -> __m256i { transmute(vcvttpd2dq( @@ -14770,6 +15761,7 @@ pub unsafe fn _mm512_maskz_cvttpd_epi32(k: __mmask8, a: __m512d) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvttpd_epi32&expand=1945) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2dq))] pub unsafe fn _mm256_mask_cvttpd_epi32(src: __m128i, k: __mmask8, a: __m256d) -> __m128i { transmute(vcvttpd2dq256(a.as_f64x4(), src.as_i32x4(), k)) @@ -14780,6 +15772,7 @@ pub unsafe fn _mm256_mask_cvttpd_epi32(src: __m128i, k: __mmask8, a: __m256d) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvttpd_epi32&expand=1946) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2dq))] pub unsafe fn _mm256_maskz_cvttpd_epi32(k: __mmask8, a: __m256d) -> __m128i { transmute(vcvttpd2dq256( @@ -14794,6 +15787,7 @@ pub unsafe fn _mm256_maskz_cvttpd_epi32(k: __mmask8, a: __m256d) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvttpd_epi32&expand=1942) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2dq))] pub unsafe fn _mm_mask_cvttpd_epi32(src: __m128i, k: __mmask8, a: __m128d) -> __m128i { transmute(vcvttpd2dq128(a.as_f64x2(), src.as_i32x4(), k)) @@ -14804,6 +15798,7 @@ pub unsafe fn _mm_mask_cvttpd_epi32(src: __m128i, k: __mmask8, a: __m128d) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvttpd_epi32&expand=1943) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2dq))] pub unsafe fn _mm_maskz_cvttpd_epi32(k: __mmask8, a: __m128d) -> __m128i { transmute(vcvttpd2dq128( @@ -14818,6 +15813,7 @@ pub unsafe fn _mm_maskz_cvttpd_epi32(k: __mmask8, a: __m128d) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvttpd_epu32&expand=1965) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2udq))] pub unsafe fn _mm512_cvttpd_epu32(a: __m512d) -> __m256i { transmute(vcvttpd2udq( @@ -14833,6 +15829,7 @@ pub unsafe fn _mm512_cvttpd_epu32(a: __m512d) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvttpd_epu32&expand=1966) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2udq))] pub unsafe fn _mm512_mask_cvttpd_epu32(src: __m256i, k: __mmask8, a: __m512d) -> __m256i { transmute(vcvttpd2udq( @@ -14848,6 +15845,7 @@ pub unsafe fn _mm512_mask_cvttpd_epu32(src: __m256i, k: __mmask8, a: __m512d) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvttpd_epu32&expand=1967) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2udq))] pub unsafe fn _mm512_maskz_cvttpd_epu32(k: __mmask8, a: __m512d) -> __m256i { transmute(vcvttpd2udq( @@ -14863,6 +15861,7 @@ pub unsafe fn _mm512_maskz_cvttpd_epu32(k: __mmask8, a: __m512d) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvttpd_epu32&expand=1962) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2udq))] pub unsafe fn _mm256_cvttpd_epu32(a: __m256d) -> __m128i { transmute(vcvttpd2udq256( @@ -14877,6 +15876,7 @@ pub unsafe fn _mm256_cvttpd_epu32(a: __m256d) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvttpd_epu32&expand=1963) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2udq))] pub unsafe fn _mm256_mask_cvttpd_epu32(src: __m128i, k: __mmask8, a: __m256d) -> __m128i { transmute(vcvttpd2udq256(a.as_f64x4(), src.as_i32x4(), k)) @@ -14887,6 +15887,7 @@ pub unsafe fn _mm256_mask_cvttpd_epu32(src: __m128i, k: __mmask8, a: __m256d) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvttpd_epu32&expand=1964) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2udq))] pub unsafe fn _mm256_maskz_cvttpd_epu32(k: __mmask8, a: __m256d) -> __m128i { transmute(vcvttpd2udq256( @@ -14901,6 +15902,7 @@ pub unsafe fn _mm256_maskz_cvttpd_epu32(k: __mmask8, a: __m256d) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttpd_epu32&expand=1959) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2udq))] pub unsafe fn _mm_cvttpd_epu32(a: __m128d) -> __m128i { transmute(vcvttpd2udq128( @@ -14915,6 +15917,7 @@ pub unsafe fn _mm_cvttpd_epu32(a: __m128d) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvttpd_epu32&expand=1960) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2udq))] pub unsafe fn _mm_mask_cvttpd_epu32(src: __m128i, k: __mmask8, a: __m128d) -> __m128i { transmute(vcvttpd2udq128(a.as_f64x2(), src.as_i32x4(), k)) @@ -14925,6 +15928,7 @@ pub unsafe fn _mm_mask_cvttpd_epu32(src: __m128i, k: __mmask8, a: __m128d) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvttpd_epu32&expand=1961) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvttpd2udq))] pub unsafe fn _mm_maskz_cvttpd_epu32(k: __mmask8, a: __m128d) -> __m128i { transmute(vcvttpd2udq128( @@ -14939,6 +15943,7 @@ pub unsafe fn _mm_maskz_cvttpd_epu32(k: __mmask8, a: __m128d) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setzero_pd&expand=5018) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vxorps))] pub unsafe fn _mm512_setzero_pd() -> __m512d { // All-0 is a properly initialized __m512d @@ -14950,6 +15955,7 @@ pub unsafe fn _mm512_setzero_pd() -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setzero_ps&expand=5021) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vxorps))] pub unsafe fn _mm512_setzero_ps() -> __m512 { // All-0 is a properly initialized __m512 @@ -14961,6 +15967,7 @@ pub unsafe fn _mm512_setzero_ps() -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setzero&expand=5014) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vxorps))] pub unsafe fn _mm512_setzero() -> __m512 { // All-0 is a properly initialized __m512 @@ -14972,6 +15979,7 @@ pub unsafe fn _mm512_setzero() -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setzero_si512&expand=5024) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vxorps))] pub unsafe fn _mm512_setzero_si512() -> __m512i { // All-0 is a properly initialized __m512i @@ -14983,6 +15991,7 @@ pub unsafe fn _mm512_setzero_si512() -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setzero_epi32&expand=5015) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vxorps))] pub unsafe fn _mm512_setzero_epi32() -> __m512i { // All-0 is a properly initialized __m512i @@ -14995,6 +16004,7 @@ pub unsafe fn _mm512_setzero_epi32() -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setr_epi32&expand=4991) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_setr_epi32( e15: i32, e14: i32, @@ -15024,6 +16034,7 @@ pub unsafe fn _mm512_setr_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set_epi8&expand=4915) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_set_epi8( e63: i8, e62: i8, @@ -15104,6 +16115,7 @@ pub unsafe fn _mm512_set_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set_epi16&expand=4905) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_set_epi16( e31: i16, e30: i16, @@ -15150,6 +16162,7 @@ pub unsafe fn _mm512_set_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set4_epi32&expand=4982) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_set4_epi32(d: i32, c: i32, b: i32, a: i32) -> __m512i { _mm512_set_epi32(d, c, b, a, d, c, b, a, d, c, b, a, d, c, b, a) } @@ -15159,6 +16172,7 @@ pub unsafe fn _mm512_set4_epi32(d: i32, c: i32, b: i32, a: i32) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set4_ps&expand=4985) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_set4_ps(d: f32, c: f32, b: f32, a: f32) -> __m512 { _mm512_set_ps(d, c, b, a, d, c, b, a, d, c, b, a, d, c, b, a) } @@ -15168,6 +16182,7 @@ pub unsafe fn _mm512_set4_ps(d: f32, c: f32, b: f32, a: f32) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set4_pd&expand=4984) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_set4_pd(d: f64, c: f64, b: f64, a: f64) -> __m512d { _mm512_set_pd(d, c, b, a, d, c, b, a) } @@ -15177,6 +16192,7 @@ pub unsafe fn _mm512_set4_pd(d: f64, c: f64, b: f64, a: f64) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setr4_epi32&expand=5009) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_setr4_epi32(d: i32, c: i32, b: i32, a: i32) -> __m512i { _mm512_set_epi32(a, b, c, d, a, b, c, d, a, b, c, d, a, b, c, d) } @@ -15186,6 +16202,7 @@ pub unsafe fn _mm512_setr4_epi32(d: i32, c: i32, b: i32, a: i32) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setr4_ps&expand=5012) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_setr4_ps(d: f32, c: f32, b: f32, a: f32) -> __m512 { _mm512_set_ps(a, b, c, d, a, b, c, d, a, b, c, d, a, b, c, d) } @@ -15195,6 +16212,7 @@ pub unsafe fn _mm512_setr4_ps(d: f32, c: f32, b: f32, a: f32) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setr4_pd&expand=5011) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_setr4_pd(d: f64, c: f64, b: f64, a: f64) -> __m512d { _mm512_set_pd(a, b, c, d, a, b, c, d) } @@ -15204,6 +16222,7 @@ pub unsafe fn _mm512_setr4_pd(d: f64, c: f64, b: f64, a: f64) -> __m512d { /// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set_epi64&expand=4910) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_set_epi64( e0: i64, e1: i64, @@ -15222,6 +16241,7 @@ pub unsafe fn _mm512_set_epi64( /// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setr_epi64&expand=4993) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_setr_epi64( e0: i64, e1: i64, @@ -15241,6 +16261,7 @@ pub unsafe fn _mm512_setr_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i32gather_pd&expand=3002) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgatherdpd, SCALE = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_i32gather_pd(offsets: __m256i, slice: *const u8) -> __m512d { @@ -15258,6 +16279,7 @@ pub unsafe fn _mm512_i32gather_pd(offsets: __m256i, slice: *co /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i32gather_pd&expand=3003) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgatherdpd, SCALE = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_i32gather_pd( @@ -15279,6 +16301,7 @@ pub unsafe fn _mm512_mask_i32gather_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i64gather_pd&expand=3092) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgatherqpd, SCALE = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_i64gather_pd(offsets: __m512i, slice: *const u8) -> __m512d { @@ -15296,6 +16319,7 @@ pub unsafe fn _mm512_i64gather_pd(offsets: __m512i, slice: *co /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i64gather_pd&expand=3093) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgatherqpd, SCALE = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_i64gather_pd( @@ -15317,6 +16341,7 @@ pub unsafe fn _mm512_mask_i64gather_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i64gather_ps&expand=3100) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgatherqps, SCALE = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_i64gather_ps(offsets: __m512i, slice: *const u8) -> __m256 { @@ -15334,6 +16359,7 @@ pub unsafe fn _mm512_i64gather_ps(offsets: __m512i, slice: *co /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i64gather_ps&expand=3101) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgatherqps, SCALE = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_i64gather_ps( @@ -15355,6 +16381,7 @@ pub unsafe fn _mm512_mask_i64gather_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i32gather_ps&expand=3010) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgatherdps, SCALE = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_i32gather_ps(offsets: __m512i, slice: *const u8) -> __m512 { @@ -15372,6 +16399,7 @@ pub unsafe fn _mm512_i32gather_ps(offsets: __m512i, slice: *co /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i32gather_ps&expand=3011) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgatherdps, SCALE = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_i32gather_ps( @@ -15393,6 +16421,7 @@ pub unsafe fn _mm512_mask_i32gather_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i32gather_epi32&expand=2986) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpgatherdd, SCALE = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_i32gather_epi32( @@ -15413,6 +16442,7 @@ pub unsafe fn _mm512_i32gather_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i32gather_epi32&expand=2987) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpgatherdd, SCALE = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_i32gather_epi32( @@ -15435,6 +16465,7 @@ pub unsafe fn _mm512_mask_i32gather_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i32gather_epi64&expand=2994) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpgatherdq, SCALE = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_i32gather_epi64( @@ -15455,6 +16486,7 @@ pub unsafe fn _mm512_i32gather_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i32gather_epi64&expand=2995) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpgatherdq, SCALE = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_i32gather_epi64( @@ -15477,6 +16509,7 @@ pub unsafe fn _mm512_mask_i32gather_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i64gather_epi64&expand=3084) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpgatherqq, SCALE = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_i64gather_epi64( @@ -15497,6 +16530,7 @@ pub unsafe fn _mm512_i64gather_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i64gather_epi64&expand=3085) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpgatherqq, SCALE = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_i64gather_epi64( @@ -15519,6 +16553,7 @@ pub unsafe fn _mm512_mask_i64gather_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i64gather_epi32&expand=3074) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpgatherqd, SCALE = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_i64gather_epi32( @@ -15539,6 +16574,7 @@ pub unsafe fn _mm512_i64gather_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i64gather_epi32&expand=3075) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpgatherqd, SCALE = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_i64gather_epi32( @@ -15561,6 +16597,7 @@ pub unsafe fn _mm512_mask_i64gather_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i32scatter_pd&expand=3044) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscatterdpd, SCALE = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_i32scatter_pd( @@ -15581,6 +16618,7 @@ pub unsafe fn _mm512_i32scatter_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i32scatter_pd&expand=3045) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscatterdpd, SCALE = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_i32scatter_pd( @@ -15601,6 +16639,7 @@ pub unsafe fn _mm512_mask_i32scatter_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i64scatter_pd&expand=3122) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscatterqpd, SCALE = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_i64scatter_pd( @@ -15621,6 +16660,7 @@ pub unsafe fn _mm512_i64scatter_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i64scatter_pd&expand=3123) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscatterqpd, SCALE = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_i64scatter_pd( @@ -15641,6 +16681,7 @@ pub unsafe fn _mm512_mask_i64scatter_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i32scatter_ps&expand=3050) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscatterdps, SCALE = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_i32scatter_ps( @@ -15661,6 +16702,7 @@ pub unsafe fn _mm512_i32scatter_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i32scatter_ps&expand=3051) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscatterdps, SCALE = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_i32scatter_ps( @@ -15681,6 +16723,7 @@ pub unsafe fn _mm512_mask_i32scatter_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i64scatter_ps&expand=3128) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscatterqps, SCALE = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_i64scatter_ps( @@ -15701,6 +16744,7 @@ pub unsafe fn _mm512_i64scatter_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i64scatter_ps&expand=3129) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscatterqps, SCALE = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_i64scatter_ps( @@ -15721,6 +16765,7 @@ pub unsafe fn _mm512_mask_i64scatter_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i32scatter_epi64&expand=3038) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpscatterdq, SCALE = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_i32scatter_epi64( @@ -15741,6 +16786,7 @@ pub unsafe fn _mm512_i32scatter_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i32scatter_epi64&expand=3039) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpscatterdq, SCALE = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_i32scatter_epi64( @@ -15762,6 +16808,7 @@ pub unsafe fn _mm512_mask_i32scatter_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_i32scatter_epi64&expand=4099) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpscatterdq, SCALE = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_i32scatter_epi64( @@ -15782,6 +16829,7 @@ pub unsafe fn _mm256_i32scatter_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i64scatter_epi64&expand=3116) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpscatterqq, SCALE = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_i64scatter_epi64( @@ -15802,6 +16850,7 @@ pub unsafe fn _mm512_i64scatter_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i64scatter_epi64&expand=3117) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpscatterqq, SCALE = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_i64scatter_epi64( @@ -15823,6 +16872,7 @@ pub unsafe fn _mm512_mask_i64scatter_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i32scatter_epi32&expand=3032) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpscatterdd, SCALE = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_i32scatter_epi32( @@ -15843,6 +16893,7 @@ pub unsafe fn _mm512_i32scatter_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i32scatter_epi32&expand=3033) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpscatterdd, SCALE = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_i32scatter_epi32( @@ -15864,6 +16915,7 @@ pub unsafe fn _mm512_mask_i32scatter_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i64scatter_epi32&expand=3108) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpscatterqd, SCALE = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_i64scatter_epi32( @@ -15884,6 +16936,7 @@ pub unsafe fn _mm512_i64scatter_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i64scatter_epi32&expand=3109) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpscatterqd, SCALE = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_i64scatter_epi32( @@ -15905,6 +16958,7 @@ pub unsafe fn _mm512_mask_i64scatter_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compress_epi32&expand=1198) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressd))] pub unsafe fn _mm512_mask_compress_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { transmute(vpcompressd(a.as_i32x16(), src.as_i32x16(), k)) @@ -15915,6 +16969,7 @@ pub unsafe fn _mm512_mask_compress_epi32(src: __m512i, k: __mmask16, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_compress_epi32&expand=1199) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressd))] pub unsafe fn _mm512_maskz_compress_epi32(k: __mmask16, a: __m512i) -> __m512i { transmute(vpcompressd( @@ -15929,6 +16984,7 @@ pub unsafe fn _mm512_maskz_compress_epi32(k: __mmask16, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compress_epi32&expand=1196) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressd))] pub unsafe fn _mm256_mask_compress_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { transmute(vpcompressd256(a.as_i32x8(), src.as_i32x8(), k)) @@ -15939,6 +16995,7 @@ pub unsafe fn _mm256_mask_compress_epi32(src: __m256i, k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_compress_epi32&expand=1197) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressd))] pub unsafe fn _mm256_maskz_compress_epi32(k: __mmask8, a: __m256i) -> __m256i { transmute(vpcompressd256( @@ -15953,6 +17010,7 @@ pub unsafe fn _mm256_maskz_compress_epi32(k: __mmask8, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compress_epi32&expand=1194) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressd))] pub unsafe fn _mm_mask_compress_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpcompressd128(a.as_i32x4(), src.as_i32x4(), k)) @@ -15963,6 +17021,7 @@ pub unsafe fn _mm_mask_compress_epi32(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_compress_epi32&expand=1195) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressd))] pub unsafe fn _mm_maskz_compress_epi32(k: __mmask8, a: __m128i) -> __m128i { transmute(vpcompressd128( @@ -15977,6 +17036,7 @@ pub unsafe fn _mm_maskz_compress_epi32(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compress_epi64&expand=1204) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressq))] pub unsafe fn _mm512_mask_compress_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { transmute(vpcompressq(a.as_i64x8(), src.as_i64x8(), k)) @@ -15987,6 +17047,7 @@ pub unsafe fn _mm512_mask_compress_epi64(src: __m512i, k: __mmask8, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_compress_epi64&expand=1205) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressq))] pub unsafe fn _mm512_maskz_compress_epi64(k: __mmask8, a: __m512i) -> __m512i { transmute(vpcompressq( @@ -16001,6 +17062,7 @@ pub unsafe fn _mm512_maskz_compress_epi64(k: __mmask8, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compress_epi64&expand=1202) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressq))] pub unsafe fn _mm256_mask_compress_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { transmute(vpcompressq256(a.as_i64x4(), src.as_i64x4(), k)) @@ -16011,6 +17073,7 @@ pub unsafe fn _mm256_mask_compress_epi64(src: __m256i, k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_compress_epi64&expand=1203) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressq))] pub unsafe fn _mm256_maskz_compress_epi64(k: __mmask8, a: __m256i) -> __m256i { transmute(vpcompressq256( @@ -16025,6 +17088,7 @@ pub unsafe fn _mm256_maskz_compress_epi64(k: __mmask8, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compress_epi64&expand=1200) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressq))] pub unsafe fn _mm_mask_compress_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpcompressq128(a.as_i64x2(), src.as_i64x2(), k)) @@ -16035,6 +17099,7 @@ pub unsafe fn _mm_mask_compress_epi64(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_compress_epi64&expand=1201) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressq))] pub unsafe fn _mm_maskz_compress_epi64(k: __mmask8, a: __m128i) -> __m128i { transmute(vpcompressq128( @@ -16049,6 +17114,7 @@ pub unsafe fn _mm_maskz_compress_epi64(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compress_ps&expand=1222) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompressps))] pub unsafe fn _mm512_mask_compress_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { transmute(vcompressps(a.as_f32x16(), src.as_f32x16(), k)) @@ -16059,6 +17125,7 @@ pub unsafe fn _mm512_mask_compress_ps(src: __m512, k: __mmask16, a: __m512) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_compress_ps&expand=1223) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompressps))] pub unsafe fn _mm512_maskz_compress_ps(k: __mmask16, a: __m512) -> __m512 { transmute(vcompressps( @@ -16073,6 +17140,7 @@ pub unsafe fn _mm512_maskz_compress_ps(k: __mmask16, a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compress_ps&expand=1220) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompressps))] pub unsafe fn _mm256_mask_compress_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { transmute(vcompressps256(a.as_f32x8(), src.as_f32x8(), k)) @@ -16083,6 +17151,7 @@ pub unsafe fn _mm256_mask_compress_ps(src: __m256, k: __mmask8, a: __m256) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_compress_ps&expand=1221) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompressps))] pub unsafe fn _mm256_maskz_compress_ps(k: __mmask8, a: __m256) -> __m256 { transmute(vcompressps256( @@ -16097,6 +17166,7 @@ pub unsafe fn _mm256_maskz_compress_ps(k: __mmask8, a: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compress_ps&expand=1218) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompressps))] pub unsafe fn _mm_mask_compress_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { transmute(vcompressps128(a.as_f32x4(), src.as_f32x4(), k)) @@ -16107,6 +17177,7 @@ pub unsafe fn _mm_mask_compress_ps(src: __m128, k: __mmask8, a: __m128) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_compress_ps&expand=1219) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompressps))] pub unsafe fn _mm_maskz_compress_ps(k: __mmask8, a: __m128) -> __m128 { transmute(vcompressps128(a.as_f32x4(), _mm_setzero_ps().as_f32x4(), k)) @@ -16117,6 +17188,7 @@ pub unsafe fn _mm_maskz_compress_ps(k: __mmask8, a: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compress_pd&expand=1216) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompresspd))] pub unsafe fn _mm512_mask_compress_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { transmute(vcompresspd(a.as_f64x8(), src.as_f64x8(), k)) @@ -16127,6 +17199,7 @@ pub unsafe fn _mm512_mask_compress_pd(src: __m512d, k: __mmask8, a: __m512d) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_compress_pd&expand=1217) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompresspd))] pub unsafe fn _mm512_maskz_compress_pd(k: __mmask8, a: __m512d) -> __m512d { transmute(vcompresspd(a.as_f64x8(), _mm512_setzero_pd().as_f64x8(), k)) @@ -16137,6 +17210,7 @@ pub unsafe fn _mm512_maskz_compress_pd(k: __mmask8, a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compress_pd&expand=1214) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompresspd))] pub unsafe fn _mm256_mask_compress_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { transmute(vcompresspd256(a.as_f64x4(), src.as_f64x4(), k)) @@ -16147,6 +17221,7 @@ pub unsafe fn _mm256_mask_compress_pd(src: __m256d, k: __mmask8, a: __m256d) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_compress_pd&expand=1215) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompresspd))] pub unsafe fn _mm256_maskz_compress_pd(k: __mmask8, a: __m256d) -> __m256d { transmute(vcompresspd256( @@ -16161,6 +17236,7 @@ pub unsafe fn _mm256_maskz_compress_pd(k: __mmask8, a: __m256d) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compress_pd&expand=1212) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompresspd))] pub unsafe fn _mm_mask_compress_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { transmute(vcompresspd128(a.as_f64x2(), src.as_f64x2(), k)) @@ -16171,6 +17247,7 @@ pub unsafe fn _mm_mask_compress_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_compress_pd&expand=1213) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompresspd))] pub unsafe fn _mm_maskz_compress_pd(k: __mmask8, a: __m128d) -> __m128d { transmute(vcompresspd128(a.as_f64x2(), _mm_setzero_pd().as_f64x2(), k)) @@ -16181,6 +17258,7 @@ pub unsafe fn _mm_maskz_compress_pd(k: __mmask8, a: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compressstoreu_epi32) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressd))] pub unsafe fn _mm512_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask16, a: __m512i) { vcompressstored(base_addr as *mut _, a.as_i32x16(), k) @@ -16191,6 +17269,7 @@ pub unsafe fn _mm512_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask16, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compressstoreu_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressd))] pub unsafe fn _mm256_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask8, a: __m256i) { vcompressstored256(base_addr as *mut _, a.as_i32x8(), k) @@ -16201,6 +17280,7 @@ pub unsafe fn _mm256_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compressstoreu_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressd))] pub unsafe fn _mm_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask8, a: __m128i) { vcompressstored128(base_addr as *mut _, a.as_i32x4(), k) @@ -16211,6 +17291,7 @@ pub unsafe fn _mm_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compressstoreu_epi64) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressq))] pub unsafe fn _mm512_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8, a: __m512i) { vcompressstoreq(base_addr as *mut _, a.as_i64x8(), k) @@ -16221,6 +17302,7 @@ pub unsafe fn _mm512_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compressstoreu_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressq))] pub unsafe fn _mm256_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8, a: __m256i) { vcompressstoreq256(base_addr as *mut _, a.as_i64x4(), k) @@ -16231,6 +17313,7 @@ pub unsafe fn _mm256_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compressstoreu_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressq))] pub unsafe fn _mm_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8, a: __m128i) { vcompressstoreq128(base_addr as *mut _, a.as_i64x2(), k) @@ -16241,6 +17324,7 @@ pub unsafe fn _mm_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compressstoreu_ps) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompressps))] pub unsafe fn _mm512_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask16, a: __m512) { vcompressstoreps(base_addr as *mut _, a.as_f32x16(), k) @@ -16251,6 +17335,7 @@ pub unsafe fn _mm512_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask16, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compressstoreu_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompressps))] pub unsafe fn _mm256_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask8, a: __m256) { vcompressstoreps256(base_addr as *mut _, a.as_f32x8(), k) @@ -16261,6 +17346,7 @@ pub unsafe fn _mm256_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compressstoreu_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompressps))] pub unsafe fn _mm_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask8, a: __m128) { vcompressstoreps128(base_addr as *mut _, a.as_f32x4(), k) @@ -16271,6 +17357,7 @@ pub unsafe fn _mm_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask8, a: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compressstoreu_pd) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompresspd))] pub unsafe fn _mm512_mask_compressstoreu_pd(base_addr: *mut u8, k: __mmask8, a: __m512d) { vcompressstorepd(base_addr as *mut _, a.as_f64x8(), k) @@ -16281,6 +17368,7 @@ pub unsafe fn _mm512_mask_compressstoreu_pd(base_addr: *mut u8, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compressstoreu_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompresspd))] pub unsafe fn _mm256_mask_compressstoreu_pd(base_addr: *mut u8, k: __mmask8, a: __m256d) { vcompressstorepd256(base_addr as *mut _, a.as_f64x4(), k) @@ -16291,6 +17379,7 @@ pub unsafe fn _mm256_mask_compressstoreu_pd(base_addr: *mut u8, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compressstoreu_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcompresspd))] pub unsafe fn _mm_mask_compressstoreu_pd(base_addr: *mut u8, k: __mmask8, a: __m128d) { vcompressstorepd128(base_addr as *mut _, a.as_f64x2(), k) @@ -16301,6 +17390,7 @@ pub unsafe fn _mm_mask_compressstoreu_pd(base_addr: *mut u8, k: __mmask8, a: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expand_epi32&expand=2316) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandd))] pub unsafe fn _mm512_mask_expand_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { transmute(vpexpandd(a.as_i32x16(), src.as_i32x16(), k)) @@ -16311,6 +17401,7 @@ pub unsafe fn _mm512_mask_expand_epi32(src: __m512i, k: __mmask16, a: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expand_epi32&expand=2317) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandd))] pub unsafe fn _mm512_maskz_expand_epi32(k: __mmask16, a: __m512i) -> __m512i { transmute(vpexpandd( @@ -16325,6 +17416,7 @@ pub unsafe fn _mm512_maskz_expand_epi32(k: __mmask16, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expand_epi32&expand=2314) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandd))] pub unsafe fn _mm256_mask_expand_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { transmute(vpexpandd256(a.as_i32x8(), src.as_i32x8(), k)) @@ -16335,6 +17427,7 @@ pub unsafe fn _mm256_mask_expand_epi32(src: __m256i, k: __mmask8, a: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expand_epi32&expand=2315) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandd))] pub unsafe fn _mm256_maskz_expand_epi32(k: __mmask8, a: __m256i) -> __m256i { transmute(vpexpandd256( @@ -16349,6 +17442,7 @@ pub unsafe fn _mm256_maskz_expand_epi32(k: __mmask8, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expand_epi32&expand=2312) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandd))] pub unsafe fn _mm_mask_expand_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpexpandd128(a.as_i32x4(), src.as_i32x4(), k)) @@ -16359,6 +17453,7 @@ pub unsafe fn _mm_mask_expand_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expand_epi32&expand=2313) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandd))] pub unsafe fn _mm_maskz_expand_epi32(k: __mmask8, a: __m128i) -> __m128i { transmute(vpexpandd128( @@ -16373,6 +17468,7 @@ pub unsafe fn _mm_maskz_expand_epi32(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expand_epi64&expand=2322) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandq))] pub unsafe fn _mm512_mask_expand_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { transmute(vpexpandq(a.as_i64x8(), src.as_i64x8(), k)) @@ -16383,6 +17479,7 @@ pub unsafe fn _mm512_mask_expand_epi64(src: __m512i, k: __mmask8, a: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expand_epi64&expand=2323) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandq))] pub unsafe fn _mm512_maskz_expand_epi64(k: __mmask8, a: __m512i) -> __m512i { transmute(vpexpandq( @@ -16397,6 +17494,7 @@ pub unsafe fn _mm512_maskz_expand_epi64(k: __mmask8, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expand_epi64&expand=2320) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandq))] pub unsafe fn _mm256_mask_expand_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { transmute(vpexpandq256(a.as_i64x4(), src.as_i64x4(), k)) @@ -16407,6 +17505,7 @@ pub unsafe fn _mm256_mask_expand_epi64(src: __m256i, k: __mmask8, a: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expand_epi64&expand=2321) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandq))] pub unsafe fn _mm256_maskz_expand_epi64(k: __mmask8, a: __m256i) -> __m256i { transmute(vpexpandq256( @@ -16421,6 +17520,7 @@ pub unsafe fn _mm256_maskz_expand_epi64(k: __mmask8, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expand_epi64&expand=2318) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandq))] pub unsafe fn _mm_mask_expand_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpexpandq128(a.as_i64x2(), src.as_i64x2(), k)) @@ -16431,6 +17531,7 @@ pub unsafe fn _mm_mask_expand_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expand_epi64&expand=2319) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandq))] pub unsafe fn _mm_maskz_expand_epi64(k: __mmask8, a: __m128i) -> __m128i { transmute(vpexpandq128( @@ -16445,6 +17546,7 @@ pub unsafe fn _mm_maskz_expand_epi64(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expand_ps&expand=2340) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vexpandps))] pub unsafe fn _mm512_mask_expand_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { transmute(vexpandps(a.as_f32x16(), src.as_f32x16(), k)) @@ -16455,6 +17557,7 @@ pub unsafe fn _mm512_mask_expand_ps(src: __m512, k: __mmask16, a: __m512) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expand_ps&expand=2341) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vexpandps))] pub unsafe fn _mm512_maskz_expand_ps(k: __mmask16, a: __m512) -> __m512 { transmute(vexpandps(a.as_f32x16(), _mm512_setzero_ps().as_f32x16(), k)) @@ -16465,6 +17568,7 @@ pub unsafe fn _mm512_maskz_expand_ps(k: __mmask16, a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expand_ps&expand=2338) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vexpandps))] pub unsafe fn _mm256_mask_expand_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { transmute(vexpandps256(a.as_f32x8(), src.as_f32x8(), k)) @@ -16475,6 +17579,7 @@ pub unsafe fn _mm256_mask_expand_ps(src: __m256, k: __mmask8, a: __m256) -> __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expand_ps&expand=2339) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vexpandps))] pub unsafe fn _mm256_maskz_expand_ps(k: __mmask8, a: __m256) -> __m256 { transmute(vexpandps256( @@ -16489,6 +17594,7 @@ pub unsafe fn _mm256_maskz_expand_ps(k: __mmask8, a: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expand_ps&expand=2336) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vexpandps))] pub unsafe fn _mm_mask_expand_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { transmute(vexpandps128(a.as_f32x4(), src.as_f32x4(), k)) @@ -16499,6 +17605,7 @@ pub unsafe fn _mm_mask_expand_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expand_ps&expand=2337) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vexpandps))] pub unsafe fn _mm_maskz_expand_ps(k: __mmask8, a: __m128) -> __m128 { transmute(vexpandps128(a.as_f32x4(), _mm_setzero_ps().as_f32x4(), k)) @@ -16509,6 +17616,7 @@ pub unsafe fn _mm_maskz_expand_ps(k: __mmask8, a: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expand_pd&expand=2334) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vexpandpd))] pub unsafe fn _mm512_mask_expand_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { transmute(vexpandpd(a.as_f64x8(), src.as_f64x8(), k)) @@ -16519,6 +17627,7 @@ pub unsafe fn _mm512_mask_expand_pd(src: __m512d, k: __mmask8, a: __m512d) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expand_pd&expand=2335) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vexpandpd))] pub unsafe fn _mm512_maskz_expand_pd(k: __mmask8, a: __m512d) -> __m512d { transmute(vexpandpd(a.as_f64x8(), _mm512_setzero_pd().as_f64x8(), k)) @@ -16529,6 +17638,7 @@ pub unsafe fn _mm512_maskz_expand_pd(k: __mmask8, a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expand_pd&expand=2332) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vexpandpd))] pub unsafe fn _mm256_mask_expand_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { transmute(vexpandpd256(a.as_f64x4(), src.as_f64x4(), k)) @@ -16539,6 +17649,7 @@ pub unsafe fn _mm256_mask_expand_pd(src: __m256d, k: __mmask8, a: __m256d) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expand_pd&expand=2333) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vexpandpd))] pub unsafe fn _mm256_maskz_expand_pd(k: __mmask8, a: __m256d) -> __m256d { transmute(vexpandpd256( @@ -16553,6 +17664,7 @@ pub unsafe fn _mm256_maskz_expand_pd(k: __mmask8, a: __m256d) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expand_pd&expand=2330) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vexpandpd))] pub unsafe fn _mm_mask_expand_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { transmute(vexpandpd128(a.as_f64x2(), src.as_f64x2(), k)) @@ -16563,6 +17675,7 @@ pub unsafe fn _mm_mask_expand_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expand_pd&expand=2331) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vexpandpd))] pub unsafe fn _mm_maskz_expand_pd(k: __mmask8, a: __m128d) -> __m128d { transmute(vexpandpd128(a.as_f64x2(), _mm_setzero_pd().as_f64x2(), k)) @@ -16573,6 +17686,7 @@ pub unsafe fn _mm_maskz_expand_pd(k: __mmask8, a: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rol_epi32&expand=4685) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_rol_epi32(a: __m512i) -> __m512i { @@ -16587,6 +17701,7 @@ pub unsafe fn _mm512_rol_epi32(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rol_epi32&expand=4683) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_rol_epi32( @@ -16605,6 +17720,7 @@ pub unsafe fn _mm512_mask_rol_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rol_epi32&expand=4684) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_rol_epi32(k: __mmask16, a: __m512i) -> __m512i { @@ -16620,6 +17736,7 @@ pub unsafe fn _mm512_maskz_rol_epi32(k: __mmask16, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rol_epi32&expand=4682) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_rol_epi32(a: __m256i) -> __m256i { @@ -16634,6 +17751,7 @@ pub unsafe fn _mm256_rol_epi32(a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rol_epi32&expand=4680) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_rol_epi32( @@ -16652,6 +17770,7 @@ pub unsafe fn _mm256_mask_rol_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rol_epi32&expand=4681) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_rol_epi32(k: __mmask8, a: __m256i) -> __m256i { @@ -16667,6 +17786,7 @@ pub unsafe fn _mm256_maskz_rol_epi32(k: __mmask8, a: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rol_epi32&expand=4679) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_rol_epi32(a: __m128i) -> __m128i { @@ -16681,6 +17801,7 @@ pub unsafe fn _mm_rol_epi32(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rol_epi32&expand=4677) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_rol_epi32( @@ -16699,6 +17820,7 @@ pub unsafe fn _mm_mask_rol_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rol_epi32&expand=4678) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_rol_epi32(k: __mmask8, a: __m128i) -> __m128i { @@ -16714,6 +17836,7 @@ pub unsafe fn _mm_maskz_rol_epi32(k: __mmask8, a: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_ror_epi32&expand=4721) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_ror_epi32(a: __m512i) -> __m512i { @@ -16728,6 +17851,7 @@ pub unsafe fn _mm512_ror_epi32(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_ror_epi32&expand=4719) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_ror_epi32( @@ -16746,6 +17870,7 @@ pub unsafe fn _mm512_mask_ror_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_ror_epi32&expand=4720) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_ror_epi32(k: __mmask16, a: __m512i) -> __m512i { @@ -16761,6 +17886,7 @@ pub unsafe fn _mm512_maskz_ror_epi32(k: __mmask16, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_ror_epi32&expand=4718) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_ror_epi32(a: __m256i) -> __m256i { @@ -16775,6 +17901,7 @@ pub unsafe fn _mm256_ror_epi32(a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_ror_epi32&expand=4716) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_ror_epi32( @@ -16793,6 +17920,7 @@ pub unsafe fn _mm256_mask_ror_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_ror_epi32&expand=4717) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_ror_epi32(k: __mmask8, a: __m256i) -> __m256i { @@ -16808,6 +17936,7 @@ pub unsafe fn _mm256_maskz_ror_epi32(k: __mmask8, a: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ror_epi32&expand=4715) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_ror_epi32(a: __m128i) -> __m128i { @@ -16822,6 +17951,7 @@ pub unsafe fn _mm_ror_epi32(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_ror_epi32&expand=4713) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_ror_epi32( @@ -16840,6 +17970,7 @@ pub unsafe fn _mm_mask_ror_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_ror_epi32&expand=4714) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprold, IMM8 = 123))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_ror_epi32(k: __mmask8, a: __m128i) -> __m128i { @@ -16855,6 +17986,7 @@ pub unsafe fn _mm_maskz_ror_epi32(k: __mmask8, a: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rol_epi64&expand=4694) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_rol_epi64(a: __m512i) -> __m512i { @@ -16869,6 +18001,7 @@ pub unsafe fn _mm512_rol_epi64(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rol_epi64&expand=4692) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_rol_epi64( @@ -16887,6 +18020,7 @@ pub unsafe fn _mm512_mask_rol_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rol_epi64&expand=4693) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_rol_epi64(k: __mmask8, a: __m512i) -> __m512i { @@ -16902,6 +18036,7 @@ pub unsafe fn _mm512_maskz_rol_epi64(k: __mmask8, a: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rol_epi64&expand=4691) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_rol_epi64(a: __m256i) -> __m256i { @@ -16916,6 +18051,7 @@ pub unsafe fn _mm256_rol_epi64(a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rol_epi64&expand=4689) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_rol_epi64( @@ -16934,6 +18070,7 @@ pub unsafe fn _mm256_mask_rol_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rol_epi64&expand=4690) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_rol_epi64(k: __mmask8, a: __m256i) -> __m256i { @@ -16949,6 +18086,7 @@ pub unsafe fn _mm256_maskz_rol_epi64(k: __mmask8, a: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rol_epi64&expand=4688) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_rol_epi64(a: __m128i) -> __m128i { @@ -16963,6 +18101,7 @@ pub unsafe fn _mm_rol_epi64(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rol_epi64&expand=4686) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_rol_epi64( @@ -16981,6 +18120,7 @@ pub unsafe fn _mm_mask_rol_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rol_epi64&expand=4687) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_rol_epi64(k: __mmask8, a: __m128i) -> __m128i { @@ -16996,6 +18136,7 @@ pub unsafe fn _mm_maskz_rol_epi64(k: __mmask8, a: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_ror_epi64&expand=4730) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_ror_epi64(a: __m512i) -> __m512i { @@ -17010,6 +18151,7 @@ pub unsafe fn _mm512_ror_epi64(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_ror_epi64&expand=4728) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_ror_epi64( @@ -17028,6 +18170,7 @@ pub unsafe fn _mm512_mask_ror_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_ror_epi64&expand=4729) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_ror_epi64(k: __mmask8, a: __m512i) -> __m512i { @@ -17043,6 +18186,7 @@ pub unsafe fn _mm512_maskz_ror_epi64(k: __mmask8, a: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_ror_epi64&expand=4727) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_ror_epi64(a: __m256i) -> __m256i { @@ -17057,6 +18201,7 @@ pub unsafe fn _mm256_ror_epi64(a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_ror_epi64&expand=4725) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_ror_epi64( @@ -17075,6 +18220,7 @@ pub unsafe fn _mm256_mask_ror_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_ror_epi64&expand=4726) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_ror_epi64(k: __mmask8, a: __m256i) -> __m256i { @@ -17090,6 +18236,7 @@ pub unsafe fn _mm256_maskz_ror_epi64(k: __mmask8, a: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ror_epi64&expand=4724) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_ror_epi64(a: __m128i) -> __m128i { @@ -17104,6 +18251,7 @@ pub unsafe fn _mm_ror_epi64(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_ror_epi64&expand=4722) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_ror_epi64( @@ -17122,6 +18270,7 @@ pub unsafe fn _mm_mask_ror_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_ror_epi64&expand=4723) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_ror_epi64(k: __mmask8, a: __m128i) -> __m128i { @@ -17137,6 +18286,7 @@ pub unsafe fn _mm_maskz_ror_epi64(k: __mmask8, a: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_slli_epi32&expand=5310) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_slli_epi32(a: __m512i) -> __m512i { @@ -17153,6 +18303,7 @@ pub unsafe fn _mm512_slli_epi32(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_slli_epi32&expand=5308) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_slli_epi32( @@ -17174,6 +18325,7 @@ pub unsafe fn _mm512_mask_slli_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_slli_epi32&expand=5309) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_slli_epi32(k: __mmask16, a: __m512i) -> __m512i { @@ -17192,6 +18344,7 @@ pub unsafe fn _mm512_maskz_slli_epi32(k: __mmask16, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_slli_epi32&expand=5305) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_slli_epi32( @@ -17213,6 +18366,7 @@ pub unsafe fn _mm256_mask_slli_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_slli_epi32&expand=5306) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_slli_epi32(k: __mmask8, a: __m256i) -> __m256i { @@ -17231,6 +18385,7 @@ pub unsafe fn _mm256_maskz_slli_epi32(k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_slli_epi32&expand=5302) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_slli_epi32( @@ -17252,6 +18407,7 @@ pub unsafe fn _mm_mask_slli_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_slli_epi32&expand=5303) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_slli_epi32(k: __mmask8, a: __m128i) -> __m128i { @@ -17270,6 +18426,7 @@ pub unsafe fn _mm_maskz_slli_epi32(k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srli_epi32&expand=5522) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_srli_epi32(a: __m512i) -> __m512i { @@ -17286,6 +18443,7 @@ pub unsafe fn _mm512_srli_epi32(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srli_epi32&expand=5520) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_srli_epi32( @@ -17307,6 +18465,7 @@ pub unsafe fn _mm512_mask_srli_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srli_epi32&expand=5521) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_srli_epi32(k: __mmask16, a: __m512i) -> __m512i { @@ -17325,6 +18484,7 @@ pub unsafe fn _mm512_maskz_srli_epi32(k: __mmask16, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srli_epi32&expand=5517) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_srli_epi32( @@ -17346,6 +18506,7 @@ pub unsafe fn _mm256_mask_srli_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srli_epi32&expand=5518) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_srli_epi32(k: __mmask8, a: __m256i) -> __m256i { @@ -17364,6 +18525,7 @@ pub unsafe fn _mm256_maskz_srli_epi32(k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srli_epi32&expand=5514) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_srli_epi32( @@ -17385,6 +18547,7 @@ pub unsafe fn _mm_mask_srli_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srli_epi32&expand=5515) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_srli_epi32(k: __mmask8, a: __m128i) -> __m128i { @@ -17403,6 +18566,7 @@ pub unsafe fn _mm_maskz_srli_epi32(k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_slli_epi64&expand=5319) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_slli_epi64(a: __m512i) -> __m512i { @@ -17419,6 +18583,7 @@ pub unsafe fn _mm512_slli_epi64(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_slli_epi64&expand=5317) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_slli_epi64( @@ -17440,6 +18605,7 @@ pub unsafe fn _mm512_mask_slli_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_slli_epi64&expand=5318) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_slli_epi64(k: __mmask8, a: __m512i) -> __m512i { @@ -17458,6 +18624,7 @@ pub unsafe fn _mm512_maskz_slli_epi64(k: __mmask8, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_slli_epi64&expand=5314) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_slli_epi64( @@ -17479,6 +18646,7 @@ pub unsafe fn _mm256_mask_slli_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_slli_epi64&expand=5315) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_slli_epi64(k: __mmask8, a: __m256i) -> __m256i { @@ -17497,6 +18665,7 @@ pub unsafe fn _mm256_maskz_slli_epi64(k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_slli_epi64&expand=5311) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_slli_epi64( @@ -17518,6 +18687,7 @@ pub unsafe fn _mm_mask_slli_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_slli_epi64&expand=5312) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_slli_epi64(k: __mmask8, a: __m128i) -> __m128i { @@ -17536,6 +18706,7 @@ pub unsafe fn _mm_maskz_slli_epi64(k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srli_epi64&expand=5531) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_srli_epi64(a: __m512i) -> __m512i { @@ -17552,6 +18723,7 @@ pub unsafe fn _mm512_srli_epi64(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srli_epi64&expand=5529) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_srli_epi64( @@ -17573,6 +18745,7 @@ pub unsafe fn _mm512_mask_srli_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srli_epi64&expand=5530) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_srli_epi64(k: __mmask8, a: __m512i) -> __m512i { @@ -17591,6 +18764,7 @@ pub unsafe fn _mm512_maskz_srli_epi64(k: __mmask8, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srli_epi64&expand=5526) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_srli_epi64( @@ -17612,6 +18786,7 @@ pub unsafe fn _mm256_mask_srli_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srli_epi64&expand=5527) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_srli_epi64(k: __mmask8, a: __m256i) -> __m256i { @@ -17630,6 +18805,7 @@ pub unsafe fn _mm256_maskz_srli_epi64(k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srli_epi64&expand=5523) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_srli_epi64( @@ -17651,6 +18827,7 @@ pub unsafe fn _mm_mask_srli_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srli_epi64&expand=5524) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_srli_epi64(k: __mmask8, a: __m128i) -> __m128i { @@ -17669,6 +18846,7 @@ pub unsafe fn _mm_maskz_srli_epi64(k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sll_epi32&expand=5280) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpslld))] pub unsafe fn _mm512_sll_epi32(a: __m512i, count: __m128i) -> __m512i { transmute(vpslld(a.as_i32x16(), count.as_i32x4())) @@ -17679,6 +18857,7 @@ pub unsafe fn _mm512_sll_epi32(a: __m512i, count: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sll_epi32&expand=5278) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpslld))] pub unsafe fn _mm512_mask_sll_epi32( src: __m512i, @@ -17695,6 +18874,7 @@ pub unsafe fn _mm512_mask_sll_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sll_epi32&expand=5279) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpslld))] pub unsafe fn _mm512_maskz_sll_epi32(k: __mmask16, a: __m512i, count: __m128i) -> __m512i { let shf = _mm512_sll_epi32(a, count).as_i32x16(); @@ -17707,6 +18887,7 @@ pub unsafe fn _mm512_maskz_sll_epi32(k: __mmask16, a: __m512i, count: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sll_epi32&expand=5275) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpslld))] pub unsafe fn _mm256_mask_sll_epi32( src: __m256i, @@ -17723,6 +18904,7 @@ pub unsafe fn _mm256_mask_sll_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sll_epi32&expand=5276) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpslld))] pub unsafe fn _mm256_maskz_sll_epi32(k: __mmask8, a: __m256i, count: __m128i) -> __m256i { let shf = _mm256_sll_epi32(a, count).as_i32x8(); @@ -17735,6 +18917,7 @@ pub unsafe fn _mm256_maskz_sll_epi32(k: __mmask8, a: __m256i, count: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sll_epi32&expand=5272) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpslld))] pub unsafe fn _mm_mask_sll_epi32(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_sll_epi32(a, count).as_i32x4(); @@ -17746,6 +18929,7 @@ pub unsafe fn _mm_mask_sll_epi32(src: __m128i, k: __mmask8, a: __m128i, count: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sll_epi32&expand=5273) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpslld))] pub unsafe fn _mm_maskz_sll_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_sll_epi32(a, count).as_i32x4(); @@ -17758,6 +18942,7 @@ pub unsafe fn _mm_maskz_sll_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srl_epi32&expand=5492) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrld))] pub unsafe fn _mm512_srl_epi32(a: __m512i, count: __m128i) -> __m512i { transmute(vpsrld(a.as_i32x16(), count.as_i32x4())) @@ -17768,6 +18953,7 @@ pub unsafe fn _mm512_srl_epi32(a: __m512i, count: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srl_epi32&expand=5490) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrld))] pub unsafe fn _mm512_mask_srl_epi32( src: __m512i, @@ -17784,6 +18970,7 @@ pub unsafe fn _mm512_mask_srl_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srl_epi32&expand=5491) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrld))] pub unsafe fn _mm512_maskz_srl_epi32(k: __mmask16, a: __m512i, count: __m128i) -> __m512i { let shf = _mm512_srl_epi32(a, count).as_i32x16(); @@ -17796,6 +18983,7 @@ pub unsafe fn _mm512_maskz_srl_epi32(k: __mmask16, a: __m512i, count: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srl_epi32&expand=5487) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrld))] pub unsafe fn _mm256_mask_srl_epi32( src: __m256i, @@ -17812,6 +19000,7 @@ pub unsafe fn _mm256_mask_srl_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srl_epi32&expand=5488) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrld))] pub unsafe fn _mm256_maskz_srl_epi32(k: __mmask8, a: __m256i, count: __m128i) -> __m256i { let shf = _mm256_srl_epi32(a, count).as_i32x8(); @@ -17824,6 +19013,7 @@ pub unsafe fn _mm256_maskz_srl_epi32(k: __mmask8, a: __m256i, count: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srl_epi32&expand=5484) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrld))] pub unsafe fn _mm_mask_srl_epi32(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_srl_epi32(a, count).as_i32x4(); @@ -17835,6 +19025,7 @@ pub unsafe fn _mm_mask_srl_epi32(src: __m128i, k: __mmask8, a: __m128i, count: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srl_epi32&expand=5485) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrld))] pub unsafe fn _mm_maskz_srl_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_srl_epi32(a, count).as_i32x4(); @@ -17847,6 +19038,7 @@ pub unsafe fn _mm_maskz_srl_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sll_epi64&expand=5289) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllq))] pub unsafe fn _mm512_sll_epi64(a: __m512i, count: __m128i) -> __m512i { transmute(vpsllq(a.as_i64x8(), count.as_i64x2())) @@ -17857,6 +19049,7 @@ pub unsafe fn _mm512_sll_epi64(a: __m512i, count: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sll_epi64&expand=5287) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllq))] pub unsafe fn _mm512_mask_sll_epi64( src: __m512i, @@ -17873,6 +19066,7 @@ pub unsafe fn _mm512_mask_sll_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sll_epi64&expand=5288) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllq))] pub unsafe fn _mm512_maskz_sll_epi64(k: __mmask8, a: __m512i, count: __m128i) -> __m512i { let shf = _mm512_sll_epi64(a, count).as_i64x8(); @@ -17885,6 +19079,7 @@ pub unsafe fn _mm512_maskz_sll_epi64(k: __mmask8, a: __m512i, count: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sll_epi64&expand=5284) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllq))] pub unsafe fn _mm256_mask_sll_epi64( src: __m256i, @@ -17901,6 +19096,7 @@ pub unsafe fn _mm256_mask_sll_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sll_epi64&expand=5285) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllq))] pub unsafe fn _mm256_maskz_sll_epi64(k: __mmask8, a: __m256i, count: __m128i) -> __m256i { let shf = _mm256_sll_epi64(a, count).as_i64x4(); @@ -17913,6 +19109,7 @@ pub unsafe fn _mm256_maskz_sll_epi64(k: __mmask8, a: __m256i, count: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sll_epi64&expand=5281) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllq))] pub unsafe fn _mm_mask_sll_epi64(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_sll_epi64(a, count).as_i64x2(); @@ -17924,6 +19121,7 @@ pub unsafe fn _mm_mask_sll_epi64(src: __m128i, k: __mmask8, a: __m128i, count: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sll_epi64&expand=5282) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllq))] pub unsafe fn _mm_maskz_sll_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_sll_epi64(a, count).as_i64x2(); @@ -17936,6 +19134,7 @@ pub unsafe fn _mm_maskz_sll_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srl_epi64&expand=5501) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlq))] pub unsafe fn _mm512_srl_epi64(a: __m512i, count: __m128i) -> __m512i { transmute(vpsrlq(a.as_i64x8(), count.as_i64x2())) @@ -17946,6 +19145,7 @@ pub unsafe fn _mm512_srl_epi64(a: __m512i, count: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srl_epi64&expand=5499) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlq))] pub unsafe fn _mm512_mask_srl_epi64( src: __m512i, @@ -17962,6 +19162,7 @@ pub unsafe fn _mm512_mask_srl_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srl_epi64&expand=5500) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlq))] pub unsafe fn _mm512_maskz_srl_epi64(k: __mmask8, a: __m512i, count: __m128i) -> __m512i { let shf = _mm512_srl_epi64(a, count).as_i64x8(); @@ -17974,6 +19175,7 @@ pub unsafe fn _mm512_maskz_srl_epi64(k: __mmask8, a: __m512i, count: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srl_epi64&expand=5496) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlq))] pub unsafe fn _mm256_mask_srl_epi64( src: __m256i, @@ -17990,6 +19192,7 @@ pub unsafe fn _mm256_mask_srl_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srl_epi64&expand=5497) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlq))] pub unsafe fn _mm256_maskz_srl_epi64(k: __mmask8, a: __m256i, count: __m128i) -> __m256i { let shf = _mm256_srl_epi64(a, count).as_i64x4(); @@ -18002,6 +19205,7 @@ pub unsafe fn _mm256_maskz_srl_epi64(k: __mmask8, a: __m256i, count: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srl_epi64&expand=5493) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlq))] pub unsafe fn _mm_mask_srl_epi64(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_srl_epi64(a, count).as_i64x2(); @@ -18013,6 +19217,7 @@ pub unsafe fn _mm_mask_srl_epi64(src: __m128i, k: __mmask8, a: __m128i, count: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srl_epi64&expand=5494) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlq))] pub unsafe fn _mm_maskz_srl_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_srl_epi64(a, count).as_i64x2(); @@ -18025,6 +19230,7 @@ pub unsafe fn _mm_maskz_srl_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sra_epi32&expand=5407) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrad))] pub unsafe fn _mm512_sra_epi32(a: __m512i, count: __m128i) -> __m512i { transmute(vpsrad(a.as_i32x16(), count.as_i32x4())) @@ -18035,6 +19241,7 @@ pub unsafe fn _mm512_sra_epi32(a: __m512i, count: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sra_epi32&expand=5405) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrad))] pub unsafe fn _mm512_mask_sra_epi32( src: __m512i, @@ -18051,6 +19258,7 @@ pub unsafe fn _mm512_mask_sra_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sra_epi32&expand=5406) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrad))] pub unsafe fn _mm512_maskz_sra_epi32(k: __mmask16, a: __m512i, count: __m128i) -> __m512i { let shf = _mm512_sra_epi32(a, count).as_i32x16(); @@ -18063,6 +19271,7 @@ pub unsafe fn _mm512_maskz_sra_epi32(k: __mmask16, a: __m512i, count: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sra_epi32&expand=5402) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrad))] pub unsafe fn _mm256_mask_sra_epi32( src: __m256i, @@ -18079,6 +19288,7 @@ pub unsafe fn _mm256_mask_sra_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sra_epi32&expand=5403) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrad))] pub unsafe fn _mm256_maskz_sra_epi32(k: __mmask8, a: __m256i, count: __m128i) -> __m256i { let shf = _mm256_sra_epi32(a, count).as_i32x8(); @@ -18091,6 +19301,7 @@ pub unsafe fn _mm256_maskz_sra_epi32(k: __mmask8, a: __m256i, count: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sra_epi32&expand=5399) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrad))] pub unsafe fn _mm_mask_sra_epi32(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_sra_epi32(a, count).as_i32x4(); @@ -18102,6 +19313,7 @@ pub unsafe fn _mm_mask_sra_epi32(src: __m128i, k: __mmask8, a: __m128i, count: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sra_epi32&expand=5400) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrad))] pub unsafe fn _mm_maskz_sra_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_sra_epi32(a, count).as_i32x4(); @@ -18114,6 +19326,7 @@ pub unsafe fn _mm_maskz_sra_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sra_epi64&expand=5416) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq))] pub unsafe fn _mm512_sra_epi64(a: __m512i, count: __m128i) -> __m512i { transmute(vpsraq(a.as_i64x8(), count.as_i64x2())) @@ -18124,6 +19337,7 @@ pub unsafe fn _mm512_sra_epi64(a: __m512i, count: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sra_epi64&expand=5414) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq))] pub unsafe fn _mm512_mask_sra_epi64( src: __m512i, @@ -18140,6 +19354,7 @@ pub unsafe fn _mm512_mask_sra_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sra_epi64&expand=5415) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq))] pub unsafe fn _mm512_maskz_sra_epi64(k: __mmask8, a: __m512i, count: __m128i) -> __m512i { let shf = _mm512_sra_epi64(a, count).as_i64x8(); @@ -18152,6 +19367,7 @@ pub unsafe fn _mm512_maskz_sra_epi64(k: __mmask8, a: __m512i, count: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sra_epi64&expand=5413) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq))] pub unsafe fn _mm256_sra_epi64(a: __m256i, count: __m128i) -> __m256i { transmute(vpsraq256(a.as_i64x4(), count.as_i64x2())) @@ -18162,6 +19378,7 @@ pub unsafe fn _mm256_sra_epi64(a: __m256i, count: __m128i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sra_epi64&expand=5411) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq))] pub unsafe fn _mm256_mask_sra_epi64( src: __m256i, @@ -18178,6 +19395,7 @@ pub unsafe fn _mm256_mask_sra_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sra_epi64&expand=5412) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq))] pub unsafe fn _mm256_maskz_sra_epi64(k: __mmask8, a: __m256i, count: __m128i) -> __m256i { let shf = _mm256_sra_epi64(a, count).as_i64x4(); @@ -18190,6 +19408,7 @@ pub unsafe fn _mm256_maskz_sra_epi64(k: __mmask8, a: __m256i, count: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sra_epi64&expand=5410) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq))] pub unsafe fn _mm_sra_epi64(a: __m128i, count: __m128i) -> __m128i { transmute(vpsraq128(a.as_i64x2(), count.as_i64x2())) @@ -18200,6 +19419,7 @@ pub unsafe fn _mm_sra_epi64(a: __m128i, count: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sra_epi64&expand=5408) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq))] pub unsafe fn _mm_mask_sra_epi64(src: __m128i, k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_sra_epi64(a, count).as_i64x2(); @@ -18211,6 +19431,7 @@ pub unsafe fn _mm_mask_sra_epi64(src: __m128i, k: __mmask8, a: __m128i, count: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sra_epi64&expand=5409) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq))] pub unsafe fn _mm_maskz_sra_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_sra_epi64(a, count).as_i64x2(); @@ -18223,6 +19444,7 @@ pub unsafe fn _mm_maskz_sra_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srai_epi32&expand=5436) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_srai_epi32(a: __m512i) -> __m512i { @@ -18235,6 +19457,7 @@ pub unsafe fn _mm512_srai_epi32(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srai_epi32&expand=5434) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_srai_epi32( @@ -18252,6 +19475,7 @@ pub unsafe fn _mm512_mask_srai_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srai_epi32&expand=5435) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_srai_epi32(k: __mmask16, a: __m512i) -> __m512i { @@ -18266,6 +19490,7 @@ pub unsafe fn _mm512_maskz_srai_epi32(k: __mmask16, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srai_epi32&expand=5431) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_srai_epi32( @@ -18282,6 +19507,7 @@ pub unsafe fn _mm256_mask_srai_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srai_epi32&expand=5432) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_srai_epi32(k: __mmask8, a: __m256i) -> __m256i { @@ -18295,6 +19521,7 @@ pub unsafe fn _mm256_maskz_srai_epi32(k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srai_epi32&expand=5428) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_srai_epi32( @@ -18311,6 +19538,7 @@ pub unsafe fn _mm_mask_srai_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srai_epi32&expand=5429) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_srai_epi32(k: __mmask8, a: __m128i) -> __m128i { @@ -18324,6 +19552,7 @@ pub unsafe fn _mm_maskz_srai_epi32(k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srai_epi64&expand=5445) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_srai_epi64(a: __m512i) -> __m512i { @@ -18336,6 +19565,7 @@ pub unsafe fn _mm512_srai_epi64(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srai_epi64&expand=5443) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_srai_epi64( @@ -18353,6 +19583,7 @@ pub unsafe fn _mm512_mask_srai_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srai_epi64&expand=5444) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_srai_epi64(k: __mmask8, a: __m512i) -> __m512i { @@ -18367,6 +19598,7 @@ pub unsafe fn _mm512_maskz_srai_epi64(k: __mmask8, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srai_epi64&expand=5442) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_srai_epi64(a: __m256i) -> __m256i { @@ -18379,6 +19611,7 @@ pub unsafe fn _mm256_srai_epi64(a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srai_epi64&expand=5440) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_srai_epi64( @@ -18396,6 +19629,7 @@ pub unsafe fn _mm256_mask_srai_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srai_epi64&expand=5441) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_srai_epi64(k: __mmask8, a: __m256i) -> __m256i { @@ -18410,6 +19644,7 @@ pub unsafe fn _mm256_maskz_srai_epi64(k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srai_epi64&expand=5439) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_srai_epi64(a: __m128i) -> __m128i { @@ -18422,6 +19657,7 @@ pub unsafe fn _mm_srai_epi64(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srai_epi64&expand=5437) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_srai_epi64( @@ -18439,6 +19675,7 @@ pub unsafe fn _mm_mask_srai_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srai_epi64&expand=5438) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_srai_epi64(k: __mmask8, a: __m128i) -> __m128i { @@ -18453,6 +19690,7 @@ pub unsafe fn _mm_maskz_srai_epi64(k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srav_epi32&expand=5465) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravd))] pub unsafe fn _mm512_srav_epi32(a: __m512i, count: __m512i) -> __m512i { transmute(vpsravd(a.as_i32x16(), count.as_i32x16())) @@ -18463,6 +19701,7 @@ pub unsafe fn _mm512_srav_epi32(a: __m512i, count: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srav_epi32&expand=5463) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravd))] pub unsafe fn _mm512_mask_srav_epi32( src: __m512i, @@ -18479,6 +19718,7 @@ pub unsafe fn _mm512_mask_srav_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srav_epi32&expand=5464) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravd))] pub unsafe fn _mm512_maskz_srav_epi32(k: __mmask16, a: __m512i, count: __m512i) -> __m512i { let shf = _mm512_srav_epi32(a, count).as_i32x16(); @@ -18491,6 +19731,7 @@ pub unsafe fn _mm512_maskz_srav_epi32(k: __mmask16, a: __m512i, count: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srav_epi32&expand=5460) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravd))] pub unsafe fn _mm256_mask_srav_epi32( src: __m256i, @@ -18507,6 +19748,7 @@ pub unsafe fn _mm256_mask_srav_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srav_epi32&expand=5461) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravd))] pub unsafe fn _mm256_maskz_srav_epi32(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { let shf = _mm256_srav_epi32(a, count).as_i32x8(); @@ -18519,6 +19761,7 @@ pub unsafe fn _mm256_maskz_srav_epi32(k: __mmask8, a: __m256i, count: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srav_epi32&expand=5457) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravd))] pub unsafe fn _mm_mask_srav_epi32( src: __m128i, @@ -18535,6 +19778,7 @@ pub unsafe fn _mm_mask_srav_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srav_epi32&expand=5458) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravd))] pub unsafe fn _mm_maskz_srav_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_srav_epi32(a, count).as_i32x4(); @@ -18547,6 +19791,7 @@ pub unsafe fn _mm_maskz_srav_epi32(k: __mmask8, a: __m128i, count: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srav_epi64&expand=5474) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravq))] pub unsafe fn _mm512_srav_epi64(a: __m512i, count: __m512i) -> __m512i { transmute(vpsravq(a.as_i64x8(), count.as_i64x8())) @@ -18557,6 +19802,7 @@ pub unsafe fn _mm512_srav_epi64(a: __m512i, count: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srav_epi64&expand=5472) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravq))] pub unsafe fn _mm512_mask_srav_epi64( src: __m512i, @@ -18573,6 +19819,7 @@ pub unsafe fn _mm512_mask_srav_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srav_epi64&expand=5473) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravq))] pub unsafe fn _mm512_maskz_srav_epi64(k: __mmask8, a: __m512i, count: __m512i) -> __m512i { let shf = _mm512_srav_epi64(a, count).as_i64x8(); @@ -18585,6 +19832,7 @@ pub unsafe fn _mm512_maskz_srav_epi64(k: __mmask8, a: __m512i, count: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srav_epi64&expand=5471) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravq))] pub unsafe fn _mm256_srav_epi64(a: __m256i, count: __m256i) -> __m256i { transmute(vpsravq256(a.as_i64x4(), count.as_i64x4())) @@ -18595,6 +19843,7 @@ pub unsafe fn _mm256_srav_epi64(a: __m256i, count: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srav_epi64&expand=5469) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravq))] pub unsafe fn _mm256_mask_srav_epi64( src: __m256i, @@ -18611,6 +19860,7 @@ pub unsafe fn _mm256_mask_srav_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srav_epi64&expand=5470) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravq))] pub unsafe fn _mm256_maskz_srav_epi64(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { let shf = _mm256_srav_epi64(a, count).as_i64x4(); @@ -18623,6 +19873,7 @@ pub unsafe fn _mm256_maskz_srav_epi64(k: __mmask8, a: __m256i, count: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srav_epi64&expand=5468) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravq))] pub unsafe fn _mm_srav_epi64(a: __m128i, count: __m128i) -> __m128i { transmute(vpsravq128(a.as_i64x2(), count.as_i64x2())) @@ -18633,6 +19884,7 @@ pub unsafe fn _mm_srav_epi64(a: __m128i, count: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srav_epi64&expand=5466) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravq))] pub unsafe fn _mm_mask_srav_epi64( src: __m128i, @@ -18649,6 +19901,7 @@ pub unsafe fn _mm_mask_srav_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srav_epi64&expand=5467) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsravq))] pub unsafe fn _mm_maskz_srav_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_srav_epi64(a, count).as_i64x2(); @@ -18661,6 +19914,7 @@ pub unsafe fn _mm_maskz_srav_epi64(k: __mmask8, a: __m128i, count: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rolv_epi32&expand=4703) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvd))] pub unsafe fn _mm512_rolv_epi32(a: __m512i, b: __m512i) -> __m512i { transmute(vprolvd(a.as_i32x16(), b.as_i32x16())) @@ -18671,6 +19925,7 @@ pub unsafe fn _mm512_rolv_epi32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rolv_epi32&expand=4701) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvd))] pub unsafe fn _mm512_mask_rolv_epi32( src: __m512i, @@ -18687,6 +19942,7 @@ pub unsafe fn _mm512_mask_rolv_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rolv_epi32&expand=4702) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvd))] pub unsafe fn _mm512_maskz_rolv_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let rol = _mm512_rolv_epi32(a, b).as_i32x16(); @@ -18699,6 +19955,7 @@ pub unsafe fn _mm512_maskz_rolv_epi32(k: __mmask16, a: __m512i, b: __m512i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rolv_epi32&expand=4700) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvd))] pub unsafe fn _mm256_rolv_epi32(a: __m256i, b: __m256i) -> __m256i { transmute(vprolvd256(a.as_i32x8(), b.as_i32x8())) @@ -18709,6 +19966,7 @@ pub unsafe fn _mm256_rolv_epi32(a: __m256i, b: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rolv_epi3&expand=4698) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvd))] pub unsafe fn _mm256_mask_rolv_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let rol = _mm256_rolv_epi32(a, b).as_i32x8(); @@ -18720,6 +19978,7 @@ pub unsafe fn _mm256_mask_rolv_epi32(src: __m256i, k: __mmask8, a: __m256i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rolv_epi32&expand=4699) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvd))] pub unsafe fn _mm256_maskz_rolv_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let rol = _mm256_rolv_epi32(a, b).as_i32x8(); @@ -18732,6 +19991,7 @@ pub unsafe fn _mm256_maskz_rolv_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rolv_epi32&expand=4697) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvd))] pub unsafe fn _mm_rolv_epi32(a: __m128i, b: __m128i) -> __m128i { transmute(vprolvd128(a.as_i32x4(), b.as_i32x4())) @@ -18742,6 +20002,7 @@ pub unsafe fn _mm_rolv_epi32(a: __m128i, b: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rolv_epi32&expand=4695) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvd))] pub unsafe fn _mm_mask_rolv_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let rol = _mm_rolv_epi32(a, b).as_i32x4(); @@ -18753,6 +20014,7 @@ pub unsafe fn _mm_mask_rolv_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rolv_epi32&expand=4696) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvd))] pub unsafe fn _mm_maskz_rolv_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let rol = _mm_rolv_epi32(a, b).as_i32x4(); @@ -18765,6 +20027,7 @@ pub unsafe fn _mm_maskz_rolv_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rorv_epi32&expand=4739) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvd))] pub unsafe fn _mm512_rorv_epi32(a: __m512i, b: __m512i) -> __m512i { transmute(vprorvd(a.as_i32x16(), b.as_i32x16())) @@ -18775,6 +20038,7 @@ pub unsafe fn _mm512_rorv_epi32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rorv_epi32&expand=4737) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvd))] pub unsafe fn _mm512_mask_rorv_epi32( src: __m512i, @@ -18791,6 +20055,7 @@ pub unsafe fn _mm512_mask_rorv_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rorv_epi32&expand=4738) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvd))] pub unsafe fn _mm512_maskz_rorv_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let ror = _mm512_rorv_epi32(a, b).as_i32x16(); @@ -18803,6 +20068,7 @@ pub unsafe fn _mm512_maskz_rorv_epi32(k: __mmask16, a: __m512i, b: __m512i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rorv_epi32&expand=4736) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvd))] pub unsafe fn _mm256_rorv_epi32(a: __m256i, b: __m256i) -> __m256i { transmute(vprorvd256(a.as_i32x8(), b.as_i32x8())) @@ -18813,6 +20079,7 @@ pub unsafe fn _mm256_rorv_epi32(a: __m256i, b: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rorv_epi32&expand=4734) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvd))] pub unsafe fn _mm256_mask_rorv_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let ror = _mm256_rorv_epi32(a, b).as_i32x8(); @@ -18824,6 +20091,7 @@ pub unsafe fn _mm256_mask_rorv_epi32(src: __m256i, k: __mmask8, a: __m256i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rorv_epi32&expand=4735) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvd))] pub unsafe fn _mm256_maskz_rorv_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let ror = _mm256_rorv_epi32(a, b).as_i32x8(); @@ -18836,6 +20104,7 @@ pub unsafe fn _mm256_maskz_rorv_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rorv_epi32&expand=4733) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvd))] pub unsafe fn _mm_rorv_epi32(a: __m128i, b: __m128i) -> __m128i { transmute(vprorvd128(a.as_i32x4(), b.as_i32x4())) @@ -18846,6 +20115,7 @@ pub unsafe fn _mm_rorv_epi32(a: __m128i, b: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rorv_epi32&expand=4731) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvd))] pub unsafe fn _mm_mask_rorv_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let ror = _mm_rorv_epi32(a, b).as_i32x4(); @@ -18857,6 +20127,7 @@ pub unsafe fn _mm_mask_rorv_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rorv_epi32&expand=4732) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvd))] pub unsafe fn _mm_maskz_rorv_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let ror = _mm_rorv_epi32(a, b).as_i32x4(); @@ -18869,6 +20140,7 @@ pub unsafe fn _mm_maskz_rorv_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rolv_epi64&expand=4712) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvq))] pub unsafe fn _mm512_rolv_epi64(a: __m512i, b: __m512i) -> __m512i { transmute(vprolvq(a.as_i64x8(), b.as_i64x8())) @@ -18879,6 +20151,7 @@ pub unsafe fn _mm512_rolv_epi64(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rolv_epi64&expand=4710) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvq))] pub unsafe fn _mm512_mask_rolv_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let rol = _mm512_rolv_epi64(a, b).as_i64x8(); @@ -18890,6 +20163,7 @@ pub unsafe fn _mm512_mask_rolv_epi64(src: __m512i, k: __mmask8, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rolv_epi64&expand=4711) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvq))] pub unsafe fn _mm512_maskz_rolv_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let rol = _mm512_rolv_epi64(a, b).as_i64x8(); @@ -18902,6 +20176,7 @@ pub unsafe fn _mm512_maskz_rolv_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rolv_epi64&expand=4709) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvq))] pub unsafe fn _mm256_rolv_epi64(a: __m256i, b: __m256i) -> __m256i { transmute(vprolvq256(a.as_i64x4(), b.as_i64x4())) @@ -18912,6 +20187,7 @@ pub unsafe fn _mm256_rolv_epi64(a: __m256i, b: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rolv_epi64&expand=4707) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvq))] pub unsafe fn _mm256_mask_rolv_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let rol = _mm256_rolv_epi64(a, b).as_i64x4(); @@ -18923,6 +20199,7 @@ pub unsafe fn _mm256_mask_rolv_epi64(src: __m256i, k: __mmask8, a: __m256i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rolv_epi64&expand=4708) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvq))] pub unsafe fn _mm256_maskz_rolv_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let rol = _mm256_rolv_epi64(a, b).as_i64x4(); @@ -18935,6 +20212,7 @@ pub unsafe fn _mm256_maskz_rolv_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rolv_epi64&expand=4706) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvq))] pub unsafe fn _mm_rolv_epi64(a: __m128i, b: __m128i) -> __m128i { transmute(vprolvq128(a.as_i64x2(), b.as_i64x2())) @@ -18945,6 +20223,7 @@ pub unsafe fn _mm_rolv_epi64(a: __m128i, b: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rolv_epi64&expand=4704) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvq))] pub unsafe fn _mm_mask_rolv_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let rol = _mm_rolv_epi64(a, b).as_i64x2(); @@ -18956,6 +20235,7 @@ pub unsafe fn _mm_mask_rolv_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rolv_epi64&expand=4705) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprolvq))] pub unsafe fn _mm_maskz_rolv_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let rol = _mm_rolv_epi64(a, b).as_i64x2(); @@ -18968,6 +20248,7 @@ pub unsafe fn _mm_maskz_rolv_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rorv_epi64&expand=4748) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvq))] pub unsafe fn _mm512_rorv_epi64(a: __m512i, b: __m512i) -> __m512i { transmute(vprorvq(a.as_i64x8(), b.as_i64x8())) @@ -18978,6 +20259,7 @@ pub unsafe fn _mm512_rorv_epi64(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rorv_epi64&expand=4746) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvq))] pub unsafe fn _mm512_mask_rorv_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let ror = _mm512_rorv_epi64(a, b).as_i64x8(); @@ -18989,6 +20271,7 @@ pub unsafe fn _mm512_mask_rorv_epi64(src: __m512i, k: __mmask8, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rorv_epi64&expand=4747) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvq))] pub unsafe fn _mm512_maskz_rorv_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let ror = _mm512_rorv_epi64(a, b).as_i64x8(); @@ -19001,6 +20284,7 @@ pub unsafe fn _mm512_maskz_rorv_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rorv_epi64&expand=4745) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvq))] pub unsafe fn _mm256_rorv_epi64(a: __m256i, b: __m256i) -> __m256i { transmute(vprorvq256(a.as_i64x4(), b.as_i64x4())) @@ -19011,6 +20295,7 @@ pub unsafe fn _mm256_rorv_epi64(a: __m256i, b: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rorv_epi64&expand=4743) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvq))] pub unsafe fn _mm256_mask_rorv_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let ror = _mm256_rorv_epi64(a, b).as_i64x4(); @@ -19022,6 +20307,7 @@ pub unsafe fn _mm256_mask_rorv_epi64(src: __m256i, k: __mmask8, a: __m256i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rorv_epi64&expand=4744) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvq))] pub unsafe fn _mm256_maskz_rorv_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let ror = _mm256_rorv_epi64(a, b).as_i64x4(); @@ -19034,6 +20320,7 @@ pub unsafe fn _mm256_maskz_rorv_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rorv_epi64&expand=4742) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvq))] pub unsafe fn _mm_rorv_epi64(a: __m128i, b: __m128i) -> __m128i { transmute(vprorvq128(a.as_i64x2(), b.as_i64x2())) @@ -19044,6 +20331,7 @@ pub unsafe fn _mm_rorv_epi64(a: __m128i, b: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rorv_epi64&expand=4740) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvq))] pub unsafe fn _mm_mask_rorv_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let ror = _mm_rorv_epi64(a, b).as_i64x2(); @@ -19055,6 +20343,7 @@ pub unsafe fn _mm_mask_rorv_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rorv_epi64&expand=4741) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vprorvq))] pub unsafe fn _mm_maskz_rorv_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let ror = _mm_rorv_epi64(a, b).as_i64x2(); @@ -19067,6 +20356,7 @@ pub unsafe fn _mm_maskz_rorv_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sllv_epi32&expand=5342) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvd))] pub unsafe fn _mm512_sllv_epi32(a: __m512i, count: __m512i) -> __m512i { transmute(vpsllvd(a.as_i32x16(), count.as_i32x16())) @@ -19077,6 +20367,7 @@ pub unsafe fn _mm512_sllv_epi32(a: __m512i, count: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sllv_epi32&expand=5340) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvd))] pub unsafe fn _mm512_mask_sllv_epi32( src: __m512i, @@ -19093,6 +20384,7 @@ pub unsafe fn _mm512_mask_sllv_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sllv_epi32&expand=5341) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvd))] pub unsafe fn _mm512_maskz_sllv_epi32(k: __mmask16, a: __m512i, count: __m512i) -> __m512i { let shf = _mm512_sllv_epi32(a, count).as_i32x16(); @@ -19105,6 +20397,7 @@ pub unsafe fn _mm512_maskz_sllv_epi32(k: __mmask16, a: __m512i, count: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sllv_epi32&expand=5337) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvd))] pub unsafe fn _mm256_mask_sllv_epi32( src: __m256i, @@ -19121,6 +20414,7 @@ pub unsafe fn _mm256_mask_sllv_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sllv_epi32&expand=5338) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvd))] pub unsafe fn _mm256_maskz_sllv_epi32(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { let shf = _mm256_sllv_epi32(a, count).as_i32x8(); @@ -19133,6 +20427,7 @@ pub unsafe fn _mm256_maskz_sllv_epi32(k: __mmask8, a: __m256i, count: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sllv_epi32&expand=5334) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvd))] pub unsafe fn _mm_mask_sllv_epi32( src: __m128i, @@ -19149,6 +20444,7 @@ pub unsafe fn _mm_mask_sllv_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sllv_epi32&expand=5335) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvd))] pub unsafe fn _mm_maskz_sllv_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_sllv_epi32(a, count).as_i32x4(); @@ -19161,6 +20457,7 @@ pub unsafe fn _mm_maskz_sllv_epi32(k: __mmask8, a: __m128i, count: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srlv_epi32&expand=5554) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvd))] pub unsafe fn _mm512_srlv_epi32(a: __m512i, count: __m512i) -> __m512i { transmute(vpsrlvd(a.as_i32x16(), count.as_i32x16())) @@ -19171,6 +20468,7 @@ pub unsafe fn _mm512_srlv_epi32(a: __m512i, count: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srlv_epi32&expand=5552) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvd))] pub unsafe fn _mm512_mask_srlv_epi32( src: __m512i, @@ -19187,6 +20485,7 @@ pub unsafe fn _mm512_mask_srlv_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srlv_epi32&expand=5553) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvd))] pub unsafe fn _mm512_maskz_srlv_epi32(k: __mmask16, a: __m512i, count: __m512i) -> __m512i { let shf = _mm512_srlv_epi32(a, count).as_i32x16(); @@ -19199,6 +20498,7 @@ pub unsafe fn _mm512_maskz_srlv_epi32(k: __mmask16, a: __m512i, count: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srlv_epi32&expand=5549) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvd))] pub unsafe fn _mm256_mask_srlv_epi32( src: __m256i, @@ -19215,6 +20515,7 @@ pub unsafe fn _mm256_mask_srlv_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srlv_epi32&expand=5550) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvd))] pub unsafe fn _mm256_maskz_srlv_epi32(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { let shf = _mm256_srlv_epi32(a, count).as_i32x8(); @@ -19227,6 +20528,7 @@ pub unsafe fn _mm256_maskz_srlv_epi32(k: __mmask8, a: __m256i, count: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srlv_epi32&expand=5546) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvd))] pub unsafe fn _mm_mask_srlv_epi32( src: __m128i, @@ -19243,6 +20545,7 @@ pub unsafe fn _mm_mask_srlv_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srlv_epi32&expand=5547) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvd))] pub unsafe fn _mm_maskz_srlv_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_srlv_epi32(a, count).as_i32x4(); @@ -19255,6 +20558,7 @@ pub unsafe fn _mm_maskz_srlv_epi32(k: __mmask8, a: __m128i, count: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sllv_epi64&expand=5351) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvq))] pub unsafe fn _mm512_sllv_epi64(a: __m512i, count: __m512i) -> __m512i { transmute(vpsllvq(a.as_i64x8(), count.as_i64x8())) @@ -19265,6 +20569,7 @@ pub unsafe fn _mm512_sllv_epi64(a: __m512i, count: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sllv_epi64&expand=5349) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvq))] pub unsafe fn _mm512_mask_sllv_epi64( src: __m512i, @@ -19281,6 +20586,7 @@ pub unsafe fn _mm512_mask_sllv_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sllv_epi64&expand=5350) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvq))] pub unsafe fn _mm512_maskz_sllv_epi64(k: __mmask8, a: __m512i, count: __m512i) -> __m512i { let shf = _mm512_sllv_epi64(a, count).as_i64x8(); @@ -19293,6 +20599,7 @@ pub unsafe fn _mm512_maskz_sllv_epi64(k: __mmask8, a: __m512i, count: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sllv_epi64&expand=5346) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvq))] pub unsafe fn _mm256_mask_sllv_epi64( src: __m256i, @@ -19309,6 +20616,7 @@ pub unsafe fn _mm256_mask_sllv_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sllv_epi64&expand=5347) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvq))] pub unsafe fn _mm256_maskz_sllv_epi64(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { let shf = _mm256_sllv_epi64(a, count).as_i64x4(); @@ -19321,6 +20629,7 @@ pub unsafe fn _mm256_maskz_sllv_epi64(k: __mmask8, a: __m256i, count: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sllv_epi64&expand=5343) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvq))] pub unsafe fn _mm_mask_sllv_epi64( src: __m128i, @@ -19337,6 +20646,7 @@ pub unsafe fn _mm_mask_sllv_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sllv_epi64&expand=5344) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsllvq))] pub unsafe fn _mm_maskz_sllv_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_sllv_epi64(a, count).as_i64x2(); @@ -19349,6 +20659,7 @@ pub unsafe fn _mm_maskz_sllv_epi64(k: __mmask8, a: __m128i, count: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srlv_epi64&expand=5563) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvq))] pub unsafe fn _mm512_srlv_epi64(a: __m512i, count: __m512i) -> __m512i { transmute(vpsrlvq(a.as_i64x8(), count.as_i64x8())) @@ -19359,6 +20670,7 @@ pub unsafe fn _mm512_srlv_epi64(a: __m512i, count: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srlv_epi64&expand=5561) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvq))] pub unsafe fn _mm512_mask_srlv_epi64( src: __m512i, @@ -19375,6 +20687,7 @@ pub unsafe fn _mm512_mask_srlv_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srlv_epi64&expand=5562) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvq))] pub unsafe fn _mm512_maskz_srlv_epi64(k: __mmask8, a: __m512i, count: __m512i) -> __m512i { let shf = _mm512_srlv_epi64(a, count).as_i64x8(); @@ -19387,6 +20700,7 @@ pub unsafe fn _mm512_maskz_srlv_epi64(k: __mmask8, a: __m512i, count: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srlv_epi64&expand=5558) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvq))] pub unsafe fn _mm256_mask_srlv_epi64( src: __m256i, @@ -19403,6 +20717,7 @@ pub unsafe fn _mm256_mask_srlv_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srlv_epi64&expand=5559) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvq))] pub unsafe fn _mm256_maskz_srlv_epi64(k: __mmask8, a: __m256i, count: __m256i) -> __m256i { let shf = _mm256_srlv_epi64(a, count).as_i64x4(); @@ -19415,6 +20730,7 @@ pub unsafe fn _mm256_maskz_srlv_epi64(k: __mmask8, a: __m256i, count: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srlv_epi64&expand=5555) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvq))] pub unsafe fn _mm_mask_srlv_epi64( src: __m128i, @@ -19431,6 +20747,7 @@ pub unsafe fn _mm_mask_srlv_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srlv_epi64&expand=5556) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpsrlvq))] pub unsafe fn _mm_maskz_srlv_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __m128i { let shf = _mm_srlv_epi64(a, count).as_i64x2(); @@ -19443,6 +20760,7 @@ pub unsafe fn _mm_maskz_srlv_epi64(k: __mmask8, a: __m128i, count: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permute_ps&expand=4170) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_permute_ps(a: __m512) -> __m512 { @@ -19476,6 +20794,7 @@ pub unsafe fn _mm512_permute_ps(a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permute_ps&expand=4168) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_permute_ps( @@ -19493,6 +20812,7 @@ pub unsafe fn _mm512_mask_permute_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permute_ps&expand=4169) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_permute_ps(k: __mmask16, a: __m512) -> __m512 { @@ -19507,6 +20827,7 @@ pub unsafe fn _mm512_maskz_permute_ps(k: __mmask16, a: __m512) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permute_ps&expand=4165) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_permute_ps( @@ -19523,6 +20844,7 @@ pub unsafe fn _mm256_mask_permute_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permute_ps&expand=4166) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_permute_ps(k: __mmask8, a: __m256) -> __m256 { @@ -19536,6 +20858,7 @@ pub unsafe fn _mm256_maskz_permute_ps(k: __mmask8, a: __m256) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permute_ps&expand=4162) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_permute_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { @@ -19548,6 +20871,7 @@ pub unsafe fn _mm_mask_permute_ps(src: __m128, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permute_ps&expand=4163) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufps, MASK = 0b11_00_01_11))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_permute_ps(k: __mmask8, a: __m128) -> __m128 { @@ -19561,6 +20885,7 @@ pub unsafe fn _mm_maskz_permute_ps(k: __mmask8, a: __m128) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permute_pd&expand=4161) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01_10_01))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_permute_pd(a: __m512d) -> __m512d { @@ -19586,6 +20911,7 @@ pub unsafe fn _mm512_permute_pd(a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permute_pd&expand=4159) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01_10_01))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_permute_pd( @@ -19603,6 +20929,7 @@ pub unsafe fn _mm512_mask_permute_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permute_pd&expand=4160) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01_10_01))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_permute_pd(k: __mmask8, a: __m512d) -> __m512d { @@ -19617,6 +20944,7 @@ pub unsafe fn _mm512_maskz_permute_pd(k: __mmask8, a: __m512d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permute_pd&expand=4156) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_permute_pd( @@ -19634,6 +20962,7 @@ pub unsafe fn _mm256_mask_permute_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permute_pd&expand=4157) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 0b11_01))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_permute_pd(k: __mmask8, a: __m256d) -> __m256d { @@ -19648,6 +20977,7 @@ pub unsafe fn _mm256_maskz_permute_pd(k: __mmask8, a: __m256d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permute_pd&expand=4153) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufpd, IMM2 = 0b01))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_permute_pd( @@ -19665,6 +20995,7 @@ pub unsafe fn _mm_mask_permute_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permute_pd&expand=4154) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufpd, IMM2 = 0b01))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_permute_pd(k: __mmask8, a: __m128d) -> __m128d { @@ -19679,6 +21010,7 @@ pub unsafe fn _mm_maskz_permute_pd(k: __mmask8, a: __m128d) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutex_epi64&expand=4208) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_permutex_epi64(a: __m512i) -> __m512i { @@ -19704,6 +21036,7 @@ pub unsafe fn _mm512_permutex_epi64(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutex_epi64&expand=4206) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_permutex_epi64( @@ -19721,6 +21054,7 @@ pub unsafe fn _mm512_mask_permutex_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutex_epi64&expand=4207) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_permutex_epi64(k: __mmask8, a: __m512i) -> __m512i { @@ -19735,6 +21069,7 @@ pub unsafe fn _mm512_maskz_permutex_epi64(k: __mmask8, a: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutex_epi64&expand=4205) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_permutex_epi64(a: __m256i) -> __m256i { @@ -19756,6 +21091,7 @@ pub unsafe fn _mm256_permutex_epi64(a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutex_epi6&expand=4203) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_permutex_epi64( @@ -19773,6 +21109,7 @@ pub unsafe fn _mm256_mask_permutex_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutex_epi64&expand=4204) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_permutex_epi64(k: __mmask8, a: __m256i) -> __m256i { @@ -19787,6 +21124,7 @@ pub unsafe fn _mm256_maskz_permutex_epi64(k: __mmask8, a: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutex_pd&expand=4214) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_permutex_pd(a: __m512d) -> __m512d { @@ -19812,6 +21150,7 @@ pub unsafe fn _mm512_permutex_pd(a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutex_pd&expand=4212) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_permutex_pd( @@ -19828,6 +21167,7 @@ pub unsafe fn _mm512_mask_permutex_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutex_pd&expand=4213) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_permutex_pd(k: __mmask8, a: __m512d) -> __m512d { @@ -19841,6 +21181,7 @@ pub unsafe fn _mm512_maskz_permutex_pd(k: __mmask8, a: __m512d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutex_pd&expand=4211) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd #[rustc_legacy_const_generics(1)] pub unsafe fn _mm256_permutex_pd(a: __m256d) -> __m256d { @@ -19862,6 +21203,7 @@ pub unsafe fn _mm256_permutex_pd(a: __m256d) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutex_pd&expand=4209) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_permutex_pd( @@ -19879,6 +21221,7 @@ pub unsafe fn _mm256_mask_permutex_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutex_pd&expand=4210) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_permutex_pd(k: __mmask8, a: __m256d) -> __m256d { @@ -19893,6 +21236,7 @@ pub unsafe fn _mm256_maskz_permutex_pd(k: __mmask8, a: __m256d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutevar_epi32&expand=4182) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //should be vpermd pub unsafe fn _mm512_permutevar_epi32(idx: __m512i, a: __m512i) -> __m512i { transmute(vpermd(a.as_i32x16(), idx.as_i32x16())) @@ -19903,6 +21247,7 @@ pub unsafe fn _mm512_permutevar_epi32(idx: __m512i, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutevar_epi32&expand=4181) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermd))] pub unsafe fn _mm512_mask_permutevar_epi32( src: __m512i, @@ -19919,6 +21264,7 @@ pub unsafe fn _mm512_mask_permutevar_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutevar_ps&expand=4200) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermilps))] pub unsafe fn _mm512_permutevar_ps(a: __m512, b: __m512i) -> __m512 { transmute(vpermilps(a.as_f32x16(), b.as_i32x16())) @@ -19929,6 +21275,7 @@ pub unsafe fn _mm512_permutevar_ps(a: __m512, b: __m512i) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutevar_ps&expand=4198) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermilps))] pub unsafe fn _mm512_mask_permutevar_ps( src: __m512, @@ -19945,6 +21292,7 @@ pub unsafe fn _mm512_mask_permutevar_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutevar_ps&expand=4199) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermilps))] pub unsafe fn _mm512_maskz_permutevar_ps(k: __mmask16, a: __m512, b: __m512i) -> __m512 { let permute = _mm512_permutevar_ps(a, b).as_f32x16(); @@ -19957,6 +21305,7 @@ pub unsafe fn _mm512_maskz_permutevar_ps(k: __mmask16, a: __m512, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm256_mask_permutevar_ps&expand=4195) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermilps))] pub unsafe fn _mm256_mask_permutevar_ps(src: __m256, k: __mmask8, a: __m256, b: __m256i) -> __m256 { let permute = _mm256_permutevar_ps(a, b).as_f32x8(); @@ -19968,6 +21317,7 @@ pub unsafe fn _mm256_mask_permutevar_ps(src: __m256, k: __mmask8, a: __m256, b: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutevar_ps&expand=4196) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermilps))] pub unsafe fn _mm256_maskz_permutevar_ps(k: __mmask8, a: __m256, b: __m256i) -> __m256 { let permute = _mm256_permutevar_ps(a, b).as_f32x8(); @@ -19980,6 +21330,7 @@ pub unsafe fn _mm256_maskz_permutevar_ps(k: __mmask8, a: __m256, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutevar_ps&expand=4192) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermilps))] pub unsafe fn _mm_mask_permutevar_ps(src: __m128, k: __mmask8, a: __m128, b: __m128i) -> __m128 { let permute = _mm_permutevar_ps(a, b).as_f32x4(); @@ -19991,6 +21342,7 @@ pub unsafe fn _mm_mask_permutevar_ps(src: __m128, k: __mmask8, a: __m128, b: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutevar_ps&expand=4193) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermilps))] pub unsafe fn _mm_maskz_permutevar_ps(k: __mmask8, a: __m128, b: __m128i) -> __m128 { let permute = _mm_permutevar_ps(a, b).as_f32x4(); @@ -20003,6 +21355,7 @@ pub unsafe fn _mm_maskz_permutevar_ps(k: __mmask8, a: __m128, b: __m128i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutevar_pd&expand=4191) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermilpd))] pub unsafe fn _mm512_permutevar_pd(a: __m512d, b: __m512i) -> __m512d { transmute(vpermilpd(a.as_f64x8(), b.as_i64x8())) @@ -20013,6 +21366,7 @@ pub unsafe fn _mm512_permutevar_pd(a: __m512d, b: __m512i) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutevar_pd&expand=4189) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermilpd))] pub unsafe fn _mm512_mask_permutevar_pd( src: __m512d, @@ -20029,6 +21383,7 @@ pub unsafe fn _mm512_mask_permutevar_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutevar_pd&expand=4190) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermilpd))] pub unsafe fn _mm512_maskz_permutevar_pd(k: __mmask8, a: __m512d, b: __m512i) -> __m512d { let permute = _mm512_permutevar_pd(a, b).as_f64x8(); @@ -20041,6 +21396,7 @@ pub unsafe fn _mm512_maskz_permutevar_pd(k: __mmask8, a: __m512d, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutevar_pd&expand=4186) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermilpd))] pub unsafe fn _mm256_mask_permutevar_pd( src: __m256d, @@ -20057,6 +21413,7 @@ pub unsafe fn _mm256_mask_permutevar_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutevar_pd&expand=4187) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermilpd))] pub unsafe fn _mm256_maskz_permutevar_pd(k: __mmask8, a: __m256d, b: __m256i) -> __m256d { let permute = _mm256_permutevar_pd(a, b).as_f64x4(); @@ -20069,6 +21426,7 @@ pub unsafe fn _mm256_maskz_permutevar_pd(k: __mmask8, a: __m256d, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutevar_pd&expand=4183) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermilpd))] pub unsafe fn _mm_mask_permutevar_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128i) -> __m128d { let permute = _mm_permutevar_pd(a, b).as_f64x2(); @@ -20080,6 +21438,7 @@ pub unsafe fn _mm_mask_permutevar_pd(src: __m128d, k: __mmask8, a: __m128d, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutevar_pd&expand=4184) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermilpd))] pub unsafe fn _mm_maskz_permutevar_pd(k: __mmask8, a: __m128d, b: __m128i) -> __m128d { let permute = _mm_permutevar_pd(a, b).as_f64x2(); @@ -20092,6 +21451,7 @@ pub unsafe fn _mm_maskz_permutevar_pd(k: __mmask8, a: __m128d, b: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutexvar_epi32&expand=4301) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //should be vpermd pub unsafe fn _mm512_permutexvar_epi32(idx: __m512i, a: __m512i) -> __m512i { transmute(vpermd(a.as_i32x16(), idx.as_i32x16())) @@ -20102,6 +21462,7 @@ pub unsafe fn _mm512_permutexvar_epi32(idx: __m512i, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutexvar_epi32&expand=4299) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermd))] pub unsafe fn _mm512_mask_permutexvar_epi32( src: __m512i, @@ -20118,6 +21479,7 @@ pub unsafe fn _mm512_mask_permutexvar_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutexvar_epi32&expand=4300) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermd))] pub unsafe fn _mm512_maskz_permutexvar_epi32(k: __mmask16, idx: __m512i, a: __m512i) -> __m512i { let permute = _mm512_permutexvar_epi32(idx, a).as_i32x16(); @@ -20130,6 +21492,7 @@ pub unsafe fn _mm512_maskz_permutexvar_epi32(k: __mmask16, idx: __m512i, a: __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutexvar_epi32&expand=4298) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //should be vpermd pub unsafe fn _mm256_permutexvar_epi32(idx: __m256i, a: __m256i) -> __m256i { _mm256_permutevar8x32_epi32(a, idx) // llvm use llvm.x86.avx2.permd @@ -20140,6 +21503,7 @@ pub unsafe fn _mm256_permutexvar_epi32(idx: __m256i, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutexvar_epi32&expand=4296) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermd))] pub unsafe fn _mm256_mask_permutexvar_epi32( src: __m256i, @@ -20156,6 +21520,7 @@ pub unsafe fn _mm256_mask_permutexvar_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutexvar_epi32&expand=4297) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermd))] pub unsafe fn _mm256_maskz_permutexvar_epi32(k: __mmask8, idx: __m256i, a: __m256i) -> __m256i { let permute = _mm256_permutexvar_epi32(idx, a).as_i32x8(); @@ -20168,6 +21533,7 @@ pub unsafe fn _mm256_maskz_permutexvar_epi32(k: __mmask8, idx: __m256i, a: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutexvar_epi64&expand=4307) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //should be vpermq pub unsafe fn _mm512_permutexvar_epi64(idx: __m512i, a: __m512i) -> __m512i { transmute(vpermq(a.as_i64x8(), idx.as_i64x8())) @@ -20178,6 +21544,7 @@ pub unsafe fn _mm512_permutexvar_epi64(idx: __m512i, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutexvar_epi64&expand=4305) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermq))] pub unsafe fn _mm512_mask_permutexvar_epi64( src: __m512i, @@ -20194,6 +21561,7 @@ pub unsafe fn _mm512_mask_permutexvar_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutexvar_epi64&expand=4306) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermq))] pub unsafe fn _mm512_maskz_permutexvar_epi64(k: __mmask8, idx: __m512i, a: __m512i) -> __m512i { let permute = _mm512_permutexvar_epi64(idx, a).as_i64x8(); @@ -20206,6 +21574,7 @@ pub unsafe fn _mm512_maskz_permutexvar_epi64(k: __mmask8, idx: __m512i, a: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutexvar_epi64&expand=4304) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //should be vpermq pub unsafe fn _mm256_permutexvar_epi64(idx: __m256i, a: __m256i) -> __m256i { transmute(vpermq256(a.as_i64x4(), idx.as_i64x4())) @@ -20216,6 +21585,7 @@ pub unsafe fn _mm256_permutexvar_epi64(idx: __m256i, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutexvar_epi64&expand=4302) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermq))] pub unsafe fn _mm256_mask_permutexvar_epi64( src: __m256i, @@ -20232,6 +21602,7 @@ pub unsafe fn _mm256_mask_permutexvar_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutexvar_epi64&expand=4303) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermq))] pub unsafe fn _mm256_maskz_permutexvar_epi64(k: __mmask8, idx: __m256i, a: __m256i) -> __m256i { let permute = _mm256_permutexvar_epi64(idx, a).as_i64x4(); @@ -20244,6 +21615,7 @@ pub unsafe fn _mm256_maskz_permutexvar_epi64(k: __mmask8, idx: __m256i, a: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutevar_ps&expand=4200) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermps))] pub unsafe fn _mm512_permutexvar_ps(idx: __m512i, a: __m512) -> __m512 { transmute(vpermps(a.as_f32x16(), idx.as_i32x16())) @@ -20254,6 +21626,7 @@ pub unsafe fn _mm512_permutexvar_ps(idx: __m512i, a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutexvar_ps&expand=4326) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermps))] pub unsafe fn _mm512_mask_permutexvar_ps( src: __m512, @@ -20270,6 +21643,7 @@ pub unsafe fn _mm512_mask_permutexvar_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutexvar_ps&expand=4327) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermps))] pub unsafe fn _mm512_maskz_permutexvar_ps(k: __mmask16, idx: __m512i, a: __m512) -> __m512 { let permute = _mm512_permutexvar_ps(idx, a).as_f32x16(); @@ -20282,6 +21656,7 @@ pub unsafe fn _mm512_maskz_permutexvar_ps(k: __mmask16, idx: __m512i, a: __m512) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutexvar_ps&expand=4325) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermps))] pub unsafe fn _mm256_permutexvar_ps(idx: __m256i, a: __m256) -> __m256 { _mm256_permutevar8x32_ps(a, idx) //llvm.x86.avx2.permps @@ -20292,6 +21667,7 @@ pub unsafe fn _mm256_permutexvar_ps(idx: __m256i, a: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutexvar_ps&expand=4323) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermps))] pub unsafe fn _mm256_mask_permutexvar_ps( src: __m256, @@ -20308,6 +21684,7 @@ pub unsafe fn _mm256_mask_permutexvar_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutexvar_ps&expand=4324) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermps))] pub unsafe fn _mm256_maskz_permutexvar_ps(k: __mmask8, idx: __m256i, a: __m256) -> __m256 { let permute = _mm256_permutexvar_ps(idx, a).as_f32x8(); @@ -20320,6 +21697,7 @@ pub unsafe fn _mm256_maskz_permutexvar_ps(k: __mmask8, idx: __m256i, a: __m256) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutexvar_pd&expand=4322) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermpd))] pub unsafe fn _mm512_permutexvar_pd(idx: __m512i, a: __m512d) -> __m512d { transmute(vpermpd(a.as_f64x8(), idx.as_i64x8())) @@ -20330,6 +21708,7 @@ pub unsafe fn _mm512_permutexvar_pd(idx: __m512i, a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutexvar_pd&expand=4320) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermpd))] pub unsafe fn _mm512_mask_permutexvar_pd( src: __m512d, @@ -20346,6 +21725,7 @@ pub unsafe fn _mm512_mask_permutexvar_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutexvar_pd&expand=4321) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermpd))] pub unsafe fn _mm512_maskz_permutexvar_pd(k: __mmask8, idx: __m512i, a: __m512d) -> __m512d { let permute = _mm512_permutexvar_pd(idx, a).as_f64x8(); @@ -20358,6 +21738,7 @@ pub unsafe fn _mm512_maskz_permutexvar_pd(k: __mmask8, idx: __m512i, a: __m512d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutexvar_pd&expand=4319) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermpd))] pub unsafe fn _mm256_permutexvar_pd(idx: __m256i, a: __m256d) -> __m256d { transmute(vpermpd256(a.as_f64x4(), idx.as_i64x4())) @@ -20368,6 +21749,7 @@ pub unsafe fn _mm256_permutexvar_pd(idx: __m256i, a: __m256d) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutexvar_pd&expand=4317) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermpd))] pub unsafe fn _mm256_mask_permutexvar_pd( src: __m256d, @@ -20384,6 +21766,7 @@ pub unsafe fn _mm256_mask_permutexvar_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutexvar_pd&expand=4318) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermpd))] pub unsafe fn _mm256_maskz_permutexvar_pd(k: __mmask8, idx: __m256i, a: __m256d) -> __m256d { let permute = _mm256_permutexvar_pd(idx, a).as_f64x4(); @@ -20396,6 +21779,7 @@ pub unsafe fn _mm256_maskz_permutexvar_pd(k: __mmask8, idx: __m256i, a: __m256d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutex2var_epi32&expand=4238) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2d or vpermt2d pub unsafe fn _mm512_permutex2var_epi32(a: __m512i, idx: __m512i, b: __m512i) -> __m512i { transmute(vpermi2d(a.as_i32x16(), idx.as_i32x16(), b.as_i32x16())) @@ -20406,6 +21790,7 @@ pub unsafe fn _mm512_permutex2var_epi32(a: __m512i, idx: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutex2var_epi32&expand=4235) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2d))] pub unsafe fn _mm512_mask_permutex2var_epi32( a: __m512i, @@ -20422,6 +21807,7 @@ pub unsafe fn _mm512_mask_permutex2var_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutex2var_epi32&expand=4237) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2d or vpermt2d pub unsafe fn _mm512_maskz_permutex2var_epi32( k: __mmask16, @@ -20439,6 +21825,7 @@ pub unsafe fn _mm512_maskz_permutex2var_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask2_permutex2var_epi32&expand=4236) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermi2d))] pub unsafe fn _mm512_mask2_permutex2var_epi32( a: __m512i, @@ -20455,6 +21842,7 @@ pub unsafe fn _mm512_mask2_permutex2var_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutex2var_epi32&expand=4234) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2d or vpermt2d pub unsafe fn _mm256_permutex2var_epi32(a: __m256i, idx: __m256i, b: __m256i) -> __m256i { transmute(vpermi2d256(a.as_i32x8(), idx.as_i32x8(), b.as_i32x8())) @@ -20465,6 +21853,7 @@ pub unsafe fn _mm256_permutex2var_epi32(a: __m256i, idx: __m256i, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutex2var_epi32&expand=4231) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2d))] pub unsafe fn _mm256_mask_permutex2var_epi32( a: __m256i, @@ -20481,6 +21870,7 @@ pub unsafe fn _mm256_mask_permutex2var_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutex2var_epi32&expand=4233) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2d or vpermt2d pub unsafe fn _mm256_maskz_permutex2var_epi32( k: __mmask8, @@ -20498,6 +21888,7 @@ pub unsafe fn _mm256_maskz_permutex2var_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask2_permutex2var_epi32&expand=4232) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermi2d))] pub unsafe fn _mm256_mask2_permutex2var_epi32( a: __m256i, @@ -20514,6 +21905,7 @@ pub unsafe fn _mm256_mask2_permutex2var_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutex2var_epi32&expand=4230) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2d or vpermt2d pub unsafe fn _mm_permutex2var_epi32(a: __m128i, idx: __m128i, b: __m128i) -> __m128i { transmute(vpermi2d128(a.as_i32x4(), idx.as_i32x4(), b.as_i32x4())) @@ -20524,6 +21916,7 @@ pub unsafe fn _mm_permutex2var_epi32(a: __m128i, idx: __m128i, b: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutex2var_epi32&expand=4227) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2d))] pub unsafe fn _mm_mask_permutex2var_epi32( a: __m128i, @@ -20540,6 +21933,7 @@ pub unsafe fn _mm_mask_permutex2var_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutex2var_epi32&expand=4229) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2d or vpermt2d pub unsafe fn _mm_maskz_permutex2var_epi32( k: __mmask8, @@ -20557,6 +21951,7 @@ pub unsafe fn _mm_maskz_permutex2var_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask2_permutex2var_epi32&expand=4228) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermi2d))] pub unsafe fn _mm_mask2_permutex2var_epi32( a: __m128i, @@ -20573,6 +21968,7 @@ pub unsafe fn _mm_mask2_permutex2var_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutex2var_epi64&expand=4250) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2q or vpermt2q pub unsafe fn _mm512_permutex2var_epi64(a: __m512i, idx: __m512i, b: __m512i) -> __m512i { transmute(vpermi2q(a.as_i64x8(), idx.as_i64x8(), b.as_i64x8())) @@ -20583,6 +21979,7 @@ pub unsafe fn _mm512_permutex2var_epi64(a: __m512i, idx: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutex2var_epi64&expand=4247) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2q))] pub unsafe fn _mm512_mask_permutex2var_epi64( a: __m512i, @@ -20599,6 +21996,7 @@ pub unsafe fn _mm512_mask_permutex2var_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutex2var_epi64&expand=4249) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2q or vpermt2q pub unsafe fn _mm512_maskz_permutex2var_epi64( k: __mmask8, @@ -20616,6 +22014,7 @@ pub unsafe fn _mm512_maskz_permutex2var_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask2_permutex2var_epi64&expand=4248) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermi2q))] pub unsafe fn _mm512_mask2_permutex2var_epi64( a: __m512i, @@ -20632,6 +22031,7 @@ pub unsafe fn _mm512_mask2_permutex2var_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutex2var_epi64&expand=4246) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2q or vpermt2q pub unsafe fn _mm256_permutex2var_epi64(a: __m256i, idx: __m256i, b: __m256i) -> __m256i { transmute(vpermi2q256(a.as_i64x4(), idx.as_i64x4(), b.as_i64x4())) @@ -20642,6 +22042,7 @@ pub unsafe fn _mm256_permutex2var_epi64(a: __m256i, idx: __m256i, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutex2var_epi64&expand=4243) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2q))] pub unsafe fn _mm256_mask_permutex2var_epi64( a: __m256i, @@ -20658,6 +22059,7 @@ pub unsafe fn _mm256_mask_permutex2var_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutex2var_epi64&expand=4245) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2q or vpermt2q pub unsafe fn _mm256_maskz_permutex2var_epi64( k: __mmask8, @@ -20675,6 +22077,7 @@ pub unsafe fn _mm256_maskz_permutex2var_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask2_permutex2var_epi64&expand=4244) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermi2q))] pub unsafe fn _mm256_mask2_permutex2var_epi64( a: __m256i, @@ -20691,6 +22094,7 @@ pub unsafe fn _mm256_mask2_permutex2var_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutex2var_epi64&expand=4242) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2q or vpermt2q pub unsafe fn _mm_permutex2var_epi64(a: __m128i, idx: __m128i, b: __m128i) -> __m128i { transmute(vpermi2q128(a.as_i64x2(), idx.as_i64x2(), b.as_i64x2())) @@ -20701,6 +22105,7 @@ pub unsafe fn _mm_permutex2var_epi64(a: __m128i, idx: __m128i, b: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutex2var_epi64&expand=4239) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2q))] pub unsafe fn _mm_mask_permutex2var_epi64( a: __m128i, @@ -20717,6 +22122,7 @@ pub unsafe fn _mm_mask_permutex2var_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutex2var_epi64&expand=4241) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2q or vpermt2q pub unsafe fn _mm_maskz_permutex2var_epi64( k: __mmask8, @@ -20734,6 +22140,7 @@ pub unsafe fn _mm_maskz_permutex2var_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask2_permutex2var_epi64&expand=4240) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermi2q))] pub unsafe fn _mm_mask2_permutex2var_epi64( a: __m128i, @@ -20750,6 +22157,7 @@ pub unsafe fn _mm_mask2_permutex2var_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutex2var_ps&expand=4286) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2ps or vpermt2ps pub unsafe fn _mm512_permutex2var_ps(a: __m512, idx: __m512i, b: __m512) -> __m512 { transmute(vpermi2ps(a.as_f32x16(), idx.as_i32x16(), b.as_f32x16())) @@ -20760,6 +22168,7 @@ pub unsafe fn _mm512_permutex2var_ps(a: __m512, idx: __m512i, b: __m512) -> __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutex2var_ps&expand=4283) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2ps))] pub unsafe fn _mm512_mask_permutex2var_ps( a: __m512, @@ -20776,6 +22185,7 @@ pub unsafe fn _mm512_mask_permutex2var_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutex2var_ps&expand=4285) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2ps or vpermt2ps pub unsafe fn _mm512_maskz_permutex2var_ps( k: __mmask16, @@ -20793,6 +22203,7 @@ pub unsafe fn _mm512_maskz_permutex2var_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask2_permutex2var_ps&expand=4284) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //should be vpermi2ps, but it shows vpermt2ps pub unsafe fn _mm512_mask2_permutex2var_ps( a: __m512, @@ -20810,6 +22221,7 @@ pub unsafe fn _mm512_mask2_permutex2var_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutex2var_ps&expand=4282) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2ps or vpermt2ps pub unsafe fn _mm256_permutex2var_ps(a: __m256, idx: __m256i, b: __m256) -> __m256 { transmute(vpermi2ps256(a.as_f32x8(), idx.as_i32x8(), b.as_f32x8())) @@ -20820,6 +22232,7 @@ pub unsafe fn _mm256_permutex2var_ps(a: __m256, idx: __m256i, b: __m256) -> __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutex2var_ps&expand=4279) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2ps))] pub unsafe fn _mm256_mask_permutex2var_ps( a: __m256, @@ -20836,6 +22249,7 @@ pub unsafe fn _mm256_mask_permutex2var_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutex2var_ps&expand=4281) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2ps or vpermt2ps pub unsafe fn _mm256_maskz_permutex2var_ps( k: __mmask8, @@ -20853,6 +22267,7 @@ pub unsafe fn _mm256_maskz_permutex2var_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask2_permutex2var_ps&expand=4280) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //should be vpermi2ps, but it shows vpermt2ps pub unsafe fn _mm256_mask2_permutex2var_ps( a: __m256, @@ -20870,6 +22285,7 @@ pub unsafe fn _mm256_mask2_permutex2var_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutex2var_ps&expand=4278) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2ps or vpermt2ps pub unsafe fn _mm_permutex2var_ps(a: __m128, idx: __m128i, b: __m128) -> __m128 { transmute(vpermi2ps128(a.as_f32x4(), idx.as_i32x4(), b.as_f32x4())) @@ -20880,6 +22296,7 @@ pub unsafe fn _mm_permutex2var_ps(a: __m128, idx: __m128i, b: __m128) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutex2var_ps&expand=4275) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2ps))] pub unsafe fn _mm_mask_permutex2var_ps(a: __m128, k: __mmask8, idx: __m128i, b: __m128) -> __m128 { let permute = _mm_permutex2var_ps(a, idx, b).as_f32x4(); @@ -20891,6 +22308,7 @@ pub unsafe fn _mm_mask_permutex2var_ps(a: __m128, k: __mmask8, idx: __m128i, b: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutex2var_ps&expand=4277) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2ps or vpermt2ps pub unsafe fn _mm_maskz_permutex2var_ps(k: __mmask8, a: __m128, idx: __m128i, b: __m128) -> __m128 { let permute = _mm_permutex2var_ps(a, idx, b).as_f32x4(); @@ -20903,6 +22321,7 @@ pub unsafe fn _mm_maskz_permutex2var_ps(k: __mmask8, a: __m128, idx: __m128i, b: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask2_permutex2var_ps&expand=4276) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //should be vpermi2ps, but it shows vpermt2ps pub unsafe fn _mm_mask2_permutex2var_ps(a: __m128, idx: __m128i, k: __mmask8, b: __m128) -> __m128 { let permute = _mm_permutex2var_ps(a, idx, b).as_f32x4(); @@ -20915,6 +22334,7 @@ pub unsafe fn _mm_mask2_permutex2var_ps(a: __m128, idx: __m128i, k: __mmask8, b: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutex2var_pd&expand=4274) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2pd or vpermt2pd pub unsafe fn _mm512_permutex2var_pd(a: __m512d, idx: __m512i, b: __m512d) -> __m512d { transmute(vpermi2pd(a.as_f64x8(), idx.as_i64x8(), b.as_f64x8())) @@ -20925,6 +22345,7 @@ pub unsafe fn _mm512_permutex2var_pd(a: __m512d, idx: __m512i, b: __m512d) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutex2var_pd&expand=4271) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2pd))] pub unsafe fn _mm512_mask_permutex2var_pd( a: __m512d, @@ -20941,6 +22362,7 @@ pub unsafe fn _mm512_mask_permutex2var_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutex2var_pd&expand=4273) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2pd or vpermt2pd pub unsafe fn _mm512_maskz_permutex2var_pd( k: __mmask8, @@ -20958,6 +22380,7 @@ pub unsafe fn _mm512_maskz_permutex2var_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask2_permutex2var_pd&expand=4272) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //should be vpermi2pd, but it shows vpermt2pd pub unsafe fn _mm512_mask2_permutex2var_pd( a: __m512d, @@ -20975,6 +22398,7 @@ pub unsafe fn _mm512_mask2_permutex2var_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutex2var_pd&expand=4270) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2pd or vpermt2pd pub unsafe fn _mm256_permutex2var_pd(a: __m256d, idx: __m256i, b: __m256d) -> __m256d { transmute(vpermi2pd256(a.as_f64x4(), idx.as_i64x4(), b.as_f64x4())) @@ -20985,6 +22409,7 @@ pub unsafe fn _mm256_permutex2var_pd(a: __m256d, idx: __m256i, b: __m256d) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutex2var_pd&expand=4267) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2pd))] pub unsafe fn _mm256_mask_permutex2var_pd( a: __m256d, @@ -21001,6 +22426,7 @@ pub unsafe fn _mm256_mask_permutex2var_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutex2var_pd&expand=4269) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2pd or vpermt2pd pub unsafe fn _mm256_maskz_permutex2var_pd( k: __mmask8, @@ -21018,6 +22444,7 @@ pub unsafe fn _mm256_maskz_permutex2var_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask2_permutex2var_pd&expand=4268) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //should be vpermi2pd, but it shows vpermt2pd pub unsafe fn _mm256_mask2_permutex2var_pd( a: __m256d, @@ -21035,6 +22462,7 @@ pub unsafe fn _mm256_mask2_permutex2var_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutex2var_pd&expand=4266) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2pd or vpermt2pd pub unsafe fn _mm_permutex2var_pd(a: __m128d, idx: __m128i, b: __m128d) -> __m128d { transmute(vpermi2pd128(a.as_f64x2(), idx.as_i64x2(), b.as_f64x2())) @@ -21045,6 +22473,7 @@ pub unsafe fn _mm_permutex2var_pd(a: __m128d, idx: __m128i, b: __m128d) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutex2var_pd&expand=4263) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2pd))] pub unsafe fn _mm_mask_permutex2var_pd( a: __m128d, @@ -21061,6 +22490,7 @@ pub unsafe fn _mm_mask_permutex2var_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutex2var_pd&expand=4265) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //vpermi2pd or vpermt2pd pub unsafe fn _mm_maskz_permutex2var_pd( k: __mmask8, @@ -21078,6 +22508,7 @@ pub unsafe fn _mm_maskz_permutex2var_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask2_permutex2var_pd&expand=4264) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //should be vpermi2pd, but it shows vpermt2pd pub unsafe fn _mm_mask2_permutex2var_pd( a: __m128d, @@ -21095,6 +22526,7 @@ pub unsafe fn _mm_mask2_permutex2var_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_shuffle_epi32&expand=5150) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufps, MASK = 9))] //should be vpshufd #[rustc_legacy_const_generics(1)] pub unsafe fn _mm512_shuffle_epi32(a: __m512i) -> __m512i { @@ -21129,6 +22561,7 @@ pub unsafe fn _mm512_shuffle_epi32(a: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shuffle_epi32&expand=5148) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_mask_shuffle_epi32( @@ -21146,6 +22579,7 @@ pub unsafe fn _mm512_mask_shuffle_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shuffle_epi32&expand=5149) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_maskz_shuffle_epi32( @@ -21163,6 +22597,7 @@ pub unsafe fn _mm512_maskz_shuffle_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shuffle_epi32&expand=5145) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_mask_shuffle_epi32( @@ -21180,6 +22615,7 @@ pub unsafe fn _mm256_mask_shuffle_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shuffle_epi32&expand=5146) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_maskz_shuffle_epi32( @@ -21197,6 +22633,7 @@ pub unsafe fn _mm256_maskz_shuffle_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shuffle_epi32&expand=5142) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_mask_shuffle_epi32( @@ -21214,6 +22651,7 @@ pub unsafe fn _mm_mask_shuffle_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shuffle_epi32&expand=5143) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshufd, MASK = 9))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_maskz_shuffle_epi32( @@ -21231,6 +22669,7 @@ pub unsafe fn _mm_maskz_shuffle_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shuffle_ps&expand=5203) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shuffle_ps(a: __m512, b: __m512) -> __m512 { @@ -21264,6 +22703,7 @@ pub unsafe fn _mm512_shuffle_ps(a: __m512, b: __m512) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shuffle_ps&expand=5201) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_shuffle_ps( @@ -21282,6 +22722,7 @@ pub unsafe fn _mm512_mask_shuffle_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shuffle_ps&expand=5202) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_shuffle_ps( @@ -21300,6 +22741,7 @@ pub unsafe fn _mm512_maskz_shuffle_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shuffle_ps&expand=5198) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_shuffle_ps( @@ -21318,6 +22760,7 @@ pub unsafe fn _mm256_mask_shuffle_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shuffle_ps&expand=5199) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_shuffle_ps( @@ -21336,6 +22779,7 @@ pub unsafe fn _mm256_maskz_shuffle_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shuffle_ps&expand=5195) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_shuffle_ps( @@ -21354,6 +22798,7 @@ pub unsafe fn _mm_mask_shuffle_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shuffle_ps&expand=5196) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_shuffle_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { @@ -21368,6 +22813,7 @@ pub unsafe fn _mm_maskz_shuffle_ps(k: __mmask8, a: __m128, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shuffle_pd&expand=5192) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shuffle_pd(a: __m512d, b: __m512d) -> __m512d { @@ -21393,6 +22839,7 @@ pub unsafe fn _mm512_shuffle_pd(a: __m512d, b: __m512d) -> __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shuffle_pd&expand=5190) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_shuffle_pd( @@ -21411,6 +22858,7 @@ pub unsafe fn _mm512_mask_shuffle_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shuffle_pd&expand=5191) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_shuffle_pd( @@ -21429,6 +22877,7 @@ pub unsafe fn _mm512_maskz_shuffle_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shuffle_pd&expand=5187) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_shuffle_pd( @@ -21447,6 +22896,7 @@ pub unsafe fn _mm256_mask_shuffle_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shuffle_pd&expand=5188) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_shuffle_pd( @@ -21465,6 +22915,7 @@ pub unsafe fn _mm256_maskz_shuffle_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shuffle_pd&expand=5184) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_shuffle_pd( @@ -21483,6 +22934,7 @@ pub unsafe fn _mm_mask_shuffle_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shuffle_pd&expand=5185) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_shuffle_pd( @@ -21501,6 +22953,7 @@ pub unsafe fn _mm_maskz_shuffle_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shuffle_i32&expand=5177) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_01_01_01))] //should be vshufi32x4 #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shuffle_i32x4(a: __m512i, b: __m512i) -> __m512i { @@ -21537,6 +22990,7 @@ pub unsafe fn _mm512_shuffle_i32x4(a: __m512i, b: __m512i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shuffle_i32x&expand=5175) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufi32x4, MASK = 0b10_11_01_01))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_shuffle_i32x4( @@ -21555,6 +23009,7 @@ pub unsafe fn _mm512_mask_shuffle_i32x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shuffle_i32&expand=5176) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufi32x4, MASK = 0b10_11_01_01))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_shuffle_i32x4( @@ -21573,6 +23028,7 @@ pub unsafe fn _mm512_maskz_shuffle_i32x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shuffle_i32x4&expand=5174) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b11))] //should be vshufi32x4 #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shuffle_i32x4(a: __m256i, b: __m256i) -> __m256i { @@ -21601,6 +23057,7 @@ pub unsafe fn _mm256_shuffle_i32x4(a: __m256i, b: __m256i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shuffle_i32x4&expand=5172) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufi32x4, MASK = 0b11))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_shuffle_i32x4( @@ -21619,6 +23076,7 @@ pub unsafe fn _mm256_mask_shuffle_i32x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shuffle_i32x4&expand=5173) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufi32x4, MASK = 0b11))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_shuffle_i32x4( @@ -21637,6 +23095,7 @@ pub unsafe fn _mm256_maskz_shuffle_i32x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shuffle_i64x2&expand=5183) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_11_11_11))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shuffle_i64x2(a: __m512i, b: __m512i) -> __m512i { @@ -21665,6 +23124,7 @@ pub unsafe fn _mm512_shuffle_i64x2(a: __m512i, b: __m512i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shuffle_i64x&expand=5181) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_11_11_11))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_shuffle_i64x2( @@ -21683,6 +23143,7 @@ pub unsafe fn _mm512_mask_shuffle_i64x2( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shuffle_i64&expand=5182) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_11_11_11))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_shuffle_i64x2( @@ -21701,6 +23162,7 @@ pub unsafe fn _mm512_maskz_shuffle_i64x2( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shuffle_i64x2&expand=5180) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b01))] //should be vshufi64x2 #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shuffle_i64x2(a: __m256i, b: __m256i) -> __m256i { @@ -21725,6 +23187,7 @@ pub unsafe fn _mm256_shuffle_i64x2(a: __m256i, b: __m256i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shuffle_i64x2&expand=5178) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b11))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_shuffle_i64x2( @@ -21743,6 +23206,7 @@ pub unsafe fn _mm256_mask_shuffle_i64x2( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shuffle_i64x2&expand=5179) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b11))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_shuffle_i64x2( @@ -21761,6 +23225,7 @@ pub unsafe fn _mm256_maskz_shuffle_i64x2( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shuffle_f32x4&expand=5165) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b1011))] //should be vshuff32x4, but generate vshuff64x2 #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shuffle_f32x4(a: __m512, b: __m512) -> __m512 { @@ -21797,6 +23262,7 @@ pub unsafe fn _mm512_shuffle_f32x4(a: __m512, b: __m512) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shuffle_f32&expand=5163) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshuff32x4, MASK = 0b1011))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_shuffle_f32x4( @@ -21815,6 +23281,7 @@ pub unsafe fn _mm512_mask_shuffle_f32x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shuffle_f32&expand=5164) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshuff32x4, MASK = 0b1011))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_shuffle_f32x4( @@ -21833,6 +23300,7 @@ pub unsafe fn _mm512_maskz_shuffle_f32x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shuffle_f32x4&expand=5162) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b01))] //should be vshuff32x4 #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shuffle_f32x4(a: __m256, b: __m256) -> __m256 { @@ -21861,6 +23329,7 @@ pub unsafe fn _mm256_shuffle_f32x4(a: __m256, b: __m256) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shuffle_f32x4&expand=5160) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshuff32x4, MASK = 0b11))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_shuffle_f32x4( @@ -21879,6 +23348,7 @@ pub unsafe fn _mm256_mask_shuffle_f32x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shuffle_f32x4&expand=5161) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshuff32x4, MASK = 0b11))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_shuffle_f32x4( @@ -21897,6 +23367,7 @@ pub unsafe fn _mm256_maskz_shuffle_f32x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shuffle_f64x2&expand=5171) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b10_11_11_11))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shuffle_f64x2(a: __m512d, b: __m512d) -> __m512d { @@ -21925,6 +23396,7 @@ pub unsafe fn _mm512_shuffle_f64x2(a: __m512d, b: __m512d) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shuffle_f64x2&expand=5169) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b10_11_11_11))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_shuffle_f64x2( @@ -21943,6 +23415,7 @@ pub unsafe fn _mm512_mask_shuffle_f64x2( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shuffle_f64x2&expand=5170) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b10_11_11_11))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_shuffle_f64x2( @@ -21961,6 +23434,7 @@ pub unsafe fn _mm512_maskz_shuffle_f64x2( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shuffle_f64x2&expand=5168) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm, MASK = 0b01))] //should be vshuff64x2 #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shuffle_f64x2(a: __m256d, b: __m256d) -> __m256d { @@ -21985,6 +23459,7 @@ pub unsafe fn _mm256_shuffle_f64x2(a: __m256d, b: __m256d) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shuffle_f64x2&expand=5166) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b11))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_shuffle_f64x2( @@ -22003,6 +23478,7 @@ pub unsafe fn _mm256_mask_shuffle_f64x2( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shuffle_f64x2&expand=5167) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b11))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_shuffle_f64x2( @@ -22021,6 +23497,7 @@ pub unsafe fn _mm256_maskz_shuffle_f64x2( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_extractf32x4_ps&expand=2442) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextractf32x4, IMM8 = 3) @@ -22041,6 +23518,7 @@ pub unsafe fn _mm512_extractf32x4_ps(a: __m512) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_extractf32x4_ps&expand=2443) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextractf32x4, IMM8 = 3) @@ -22061,6 +23539,7 @@ pub unsafe fn _mm512_mask_extractf32x4_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_extractf32x4_ps&expand=2444) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextractf32x4, IMM8 = 3) @@ -22078,6 +23557,7 @@ pub unsafe fn _mm512_maskz_extractf32x4_ps(k: __mmask8, a: __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_extractf32x4_ps&expand=2439) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextract, IMM8 = 1) //should be vextractf32x4 @@ -22096,6 +23576,7 @@ pub unsafe fn _mm256_extractf32x4_ps(a: __m256) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_extractf32x4_ps&expand=2440) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextractf32x4, IMM8 = 1) @@ -22116,6 +23597,7 @@ pub unsafe fn _mm256_mask_extractf32x4_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_extractf32x4_ps&expand=2441) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextractf32x4, IMM8 = 1) @@ -22133,6 +23615,7 @@ pub unsafe fn _mm256_maskz_extractf32x4_ps(k: __mmask8, a: __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_extracti64x4_epi64&expand=2473) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextractf64x4, IMM1 = 1) //should be vextracti64x4 @@ -22151,6 +23634,7 @@ pub unsafe fn _mm512_extracti64x4_epi64(a: __m512i) -> __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_extracti64x4_epi64&expand=2474) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextracti64x4, IMM1 = 1) @@ -22171,6 +23655,7 @@ pub unsafe fn _mm512_mask_extracti64x4_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_extracti64x4_epi64&expand=2475) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextracti64x4, IMM1 = 1) @@ -22188,6 +23673,7 @@ pub unsafe fn _mm512_maskz_extracti64x4_epi64(k: __mmask8, a: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_extractf64x4_pd&expand=2454) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextractf64x4, IMM8 = 1) @@ -22206,6 +23692,7 @@ pub unsafe fn _mm512_extractf64x4_pd(a: __m512d) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_extractf64x4_pd&expand=2455) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextractf64x4, IMM8 = 1) @@ -22226,6 +23713,7 @@ pub unsafe fn _mm512_mask_extractf64x4_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_extractf64x4_pd&expand=2456) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextractf64x4, IMM8 = 1) @@ -22243,6 +23731,7 @@ pub unsafe fn _mm512_maskz_extractf64x4_pd(k: __mmask8, a: __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_extracti32x4_epi32&expand=2461) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextractf32x4, IMM2 = 3) //should be vextracti32x4 @@ -22266,6 +23755,7 @@ pub unsafe fn _mm512_extracti32x4_epi32(a: __m512i) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_extracti32x4_epi32&expand=2462) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextracti32x4, IMM2 = 3) @@ -22286,6 +23776,7 @@ pub unsafe fn _mm512_mask_extracti32x4_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_extracti32x4_epi32&expand=2463) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextracti32x4, IMM2 = 3) @@ -22303,6 +23794,7 @@ pub unsafe fn _mm512_maskz_extracti32x4_epi32(k: __mmask8, a: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_extracti32x4_epi32&expand=2458) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextract, IMM1 = 1) //should be vextracti32x4 @@ -22324,6 +23816,7 @@ pub unsafe fn _mm256_extracti32x4_epi32(a: __m256i) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_extracti32x4_epi32&expand=2459) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextracti32x4, IMM1 = 1) @@ -22344,6 +23837,7 @@ pub unsafe fn _mm256_mask_extracti32x4_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_extracti32x4_epi32&expand=2460) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextracti32x4, IMM1 = 1) @@ -22361,6 +23855,7 @@ pub unsafe fn _mm256_maskz_extracti32x4_epi32(k: __mmask8, a: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_moveldup_ps&expand=3862) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovsldup))] pub unsafe fn _mm512_moveldup_ps(a: __m512) -> __m512 { let r: f32x16 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14]); @@ -22372,6 +23867,7 @@ pub unsafe fn _mm512_moveldup_ps(a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_moveldup_ps&expand=3860) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovsldup))] pub unsafe fn _mm512_mask_moveldup_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { let mov: f32x16 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14]); @@ -22383,6 +23879,7 @@ pub unsafe fn _mm512_mask_moveldup_ps(src: __m512, k: __mmask16, a: __m512) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_moveldup_ps&expand=3861) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovsldup))] pub unsafe fn _mm512_maskz_moveldup_ps(k: __mmask16, a: __m512) -> __m512 { let mov: f32x16 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14]); @@ -22395,6 +23892,7 @@ pub unsafe fn _mm512_maskz_moveldup_ps(k: __mmask16, a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_moveldup_ps&expand=3857) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovsldup))] pub unsafe fn _mm256_mask_moveldup_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { let mov = _mm256_moveldup_ps(a); @@ -22406,6 +23904,7 @@ pub unsafe fn _mm256_mask_moveldup_ps(src: __m256, k: __mmask8, a: __m256) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_moveldup_ps&expand=3858) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovsldup))] pub unsafe fn _mm256_maskz_moveldup_ps(k: __mmask8, a: __m256) -> __m256 { let mov = _mm256_moveldup_ps(a); @@ -22418,6 +23917,7 @@ pub unsafe fn _mm256_maskz_moveldup_ps(k: __mmask8, a: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_moveldup_ps&expand=3854) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovsldup))] pub unsafe fn _mm_mask_moveldup_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { let mov = _mm_moveldup_ps(a); @@ -22429,6 +23929,7 @@ pub unsafe fn _mm_mask_moveldup_ps(src: __m128, k: __mmask8, a: __m128) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_moveldup_ps&expand=3855) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovsldup))] pub unsafe fn _mm_maskz_moveldup_ps(k: __mmask8, a: __m128) -> __m128 { let mov = _mm_moveldup_ps(a); @@ -22441,6 +23942,7 @@ pub unsafe fn _mm_maskz_moveldup_ps(k: __mmask8, a: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_movehdup_ps&expand=3852) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovshdup))] pub unsafe fn _mm512_movehdup_ps(a: __m512) -> __m512 { let r: f32x16 = simd_shuffle!(a, a, [1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15]); @@ -22452,6 +23954,7 @@ pub unsafe fn _mm512_movehdup_ps(a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_movehdup&expand=3850) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovshdup))] pub unsafe fn _mm512_mask_movehdup_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 { let mov: f32x16 = simd_shuffle!(a, a, [1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15]); @@ -22463,6 +23966,7 @@ pub unsafe fn _mm512_mask_movehdup_ps(src: __m512, k: __mmask16, a: __m512) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_moveh&expand=3851) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovshdup))] pub unsafe fn _mm512_maskz_movehdup_ps(k: __mmask16, a: __m512) -> __m512 { let mov: f32x16 = simd_shuffle!(a, a, [1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15]); @@ -22475,6 +23979,7 @@ pub unsafe fn _mm512_maskz_movehdup_ps(k: __mmask16, a: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_movehdup_ps&expand=3847) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovshdup))] pub unsafe fn _mm256_mask_movehdup_ps(src: __m256, k: __mmask8, a: __m256) -> __m256 { let mov = _mm256_movehdup_ps(a); @@ -22486,6 +23991,7 @@ pub unsafe fn _mm256_mask_movehdup_ps(src: __m256, k: __mmask8, a: __m256) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_movehdup_ps&expand=3848) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovshdup))] pub unsafe fn _mm256_maskz_movehdup_ps(k: __mmask8, a: __m256) -> __m256 { let mov = _mm256_movehdup_ps(a); @@ -22498,6 +24004,7 @@ pub unsafe fn _mm256_maskz_movehdup_ps(k: __mmask8, a: __m256) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_movehdup_ps&expand=3844) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovshdup))] pub unsafe fn _mm_mask_movehdup_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { let mov = _mm_movehdup_ps(a); @@ -22509,6 +24016,7 @@ pub unsafe fn _mm_mask_movehdup_ps(src: __m128, k: __mmask8, a: __m128) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_movehdup_ps&expand=3845) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovshdup))] pub unsafe fn _mm_maskz_movehdup_ps(k: __mmask8, a: __m128) -> __m128 { let mov = _mm_movehdup_ps(a); @@ -22521,6 +24029,7 @@ pub unsafe fn _mm_maskz_movehdup_ps(k: __mmask8, a: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_movedup_pd&expand=3843) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovddup))] pub unsafe fn _mm512_movedup_pd(a: __m512d) -> __m512d { let r: f64x8 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6]); @@ -22532,6 +24041,7 @@ pub unsafe fn _mm512_movedup_pd(a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_movedup_pd&expand=3841) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovddup))] pub unsafe fn _mm512_mask_movedup_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d { let mov: f64x8 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6]); @@ -22543,6 +24053,7 @@ pub unsafe fn _mm512_mask_movedup_pd(src: __m512d, k: __mmask8, a: __m512d) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_movedup_pd&expand=3842) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovddup))] pub unsafe fn _mm512_maskz_movedup_pd(k: __mmask8, a: __m512d) -> __m512d { let mov: f64x8 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6]); @@ -22555,6 +24066,7 @@ pub unsafe fn _mm512_maskz_movedup_pd(k: __mmask8, a: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_movedup_pd&expand=3838) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovddup))] pub unsafe fn _mm256_mask_movedup_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m256d { let mov = _mm256_movedup_pd(a); @@ -22566,6 +24078,7 @@ pub unsafe fn _mm256_mask_movedup_pd(src: __m256d, k: __mmask8, a: __m256d) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_movedup_pd&expand=3839) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovddup))] pub unsafe fn _mm256_maskz_movedup_pd(k: __mmask8, a: __m256d) -> __m256d { let mov = _mm256_movedup_pd(a); @@ -22578,6 +24091,7 @@ pub unsafe fn _mm256_maskz_movedup_pd(k: __mmask8, a: __m256d) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_movedup_pd&expand=3835) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovddup))] pub unsafe fn _mm_mask_movedup_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d { let mov = _mm_movedup_pd(a); @@ -22589,6 +24103,7 @@ pub unsafe fn _mm_mask_movedup_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_movedup_pd&expand=3836) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovddup))] pub unsafe fn _mm_maskz_movedup_pd(k: __mmask8, a: __m128d) -> __m128d { let mov = _mm_movedup_pd(a); @@ -22601,6 +24116,7 @@ pub unsafe fn _mm_maskz_movedup_pd(k: __mmask8, a: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_inserti32x4&expand=3174) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))] //should be vinserti32x4 #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_inserti32x4(a: __m512i, b: __m128i) -> __m512i { @@ -22633,6 +24149,7 @@ pub unsafe fn _mm512_inserti32x4(a: __m512i, b: __m128i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_inserti32x4&expand=3175) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vinserti32x4, IMM8 = 2))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_inserti32x4( @@ -22651,6 +24168,7 @@ pub unsafe fn _mm512_mask_inserti32x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_inserti32x4&expand=3176) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vinserti32x4, IMM8 = 2))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_inserti32x4( @@ -22669,6 +24187,7 @@ pub unsafe fn _mm512_maskz_inserti32x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_inserti32x4&expand=3171) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vinsert, IMM8 = 1) //should be vinserti32x4 @@ -22690,6 +24209,7 @@ pub unsafe fn _mm256_inserti32x4(a: __m256i, b: __m128i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_inserti32x4&expand=3172) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vinserti32x4, IMM8 = 1) @@ -22711,6 +24231,7 @@ pub unsafe fn _mm256_mask_inserti32x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_inserti32x4&expand=3173) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vinserti32x4, IMM8 = 1) @@ -22732,6 +24253,7 @@ pub unsafe fn _mm256_maskz_inserti32x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_inserti64x4&expand=3186) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))] //should be vinserti64x4 #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_inserti64x4(a: __m512i, b: __m256i) -> __m512i { @@ -22748,6 +24270,7 @@ pub unsafe fn _mm512_inserti64x4(a: __m512i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_inserti64x4&expand=3187) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vinserti64x4, IMM8 = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_inserti64x4( @@ -22766,6 +24289,7 @@ pub unsafe fn _mm512_mask_inserti64x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_inserti64x4&expand=3188) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vinserti64x4, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_inserti64x4( @@ -22784,6 +24308,7 @@ pub unsafe fn _mm512_maskz_inserti64x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_insertf32x4&expand=3155) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_insertf32x4(a: __m512, b: __m128) -> __m512 { @@ -22814,6 +24339,7 @@ pub unsafe fn _mm512_insertf32x4(a: __m512, b: __m128) -> __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_insertf32x4&expand=3156) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_insertf32x4( @@ -22832,6 +24358,7 @@ pub unsafe fn _mm512_mask_insertf32x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_insertf32x4&expand=3157) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_insertf32x4( @@ -22850,6 +24377,7 @@ pub unsafe fn _mm512_maskz_insertf32x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_insertf32x4&expand=3152) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vinsert, IMM8 = 1) //should be vinsertf32x4 @@ -22869,6 +24397,7 @@ pub unsafe fn _mm256_insertf32x4(a: __m256, b: __m128) -> __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_insertf32x4&expand=3153) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vinsertf32x4, IMM8 = 1) @@ -22890,6 +24419,7 @@ pub unsafe fn _mm256_mask_insertf32x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_insertf32x4&expand=3154) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vinsertf32x4, IMM8 = 1) @@ -22911,6 +24441,7 @@ pub unsafe fn _mm256_maskz_insertf32x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_insertf64x4&expand=3167) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_insertf64x4(a: __m512d, b: __m256d) -> __m512d { @@ -22927,6 +24458,7 @@ pub unsafe fn _mm512_insertf64x4(a: __m512d, b: __m256d) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_insertf64x4&expand=3168) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_insertf64x4( @@ -22945,6 +24477,7 @@ pub unsafe fn _mm512_mask_insertf64x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_insertf64x4&expand=3169) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_insertf64x4( @@ -22963,6 +24496,7 @@ pub unsafe fn _mm512_maskz_insertf64x4( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpackhi_epi32&expand=6021) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpckhps))] //should be vpunpckhdq pub unsafe fn _mm512_unpackhi_epi32(a: __m512i, b: __m512i) -> __m512i { let a = a.as_i32x16(); @@ -22983,6 +24517,7 @@ pub unsafe fn _mm512_unpackhi_epi32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpackhi_epi32&expand=6019) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhdq))] pub unsafe fn _mm512_mask_unpackhi_epi32( src: __m512i, @@ -22999,6 +24534,7 @@ pub unsafe fn _mm512_mask_unpackhi_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpackhi_epi32&expand=6020) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhdq))] pub unsafe fn _mm512_maskz_unpackhi_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let unpackhi = _mm512_unpackhi_epi32(a, b).as_i32x16(); @@ -23011,6 +24547,7 @@ pub unsafe fn _mm512_maskz_unpackhi_epi32(k: __mmask16, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpackhi_epi32&expand=6016) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhdq))] pub unsafe fn _mm256_mask_unpackhi_epi32( src: __m256i, @@ -23027,6 +24564,7 @@ pub unsafe fn _mm256_mask_unpackhi_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpackhi_epi32&expand=6017) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhdq))] pub unsafe fn _mm256_maskz_unpackhi_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let unpackhi = _mm256_unpackhi_epi32(a, b).as_i32x8(); @@ -23039,6 +24577,7 @@ pub unsafe fn _mm256_maskz_unpackhi_epi32(k: __mmask8, a: __m256i, b: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpackhi_epi32&expand=6013) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhdq))] pub unsafe fn _mm_mask_unpackhi_epi32( src: __m128i, @@ -23055,6 +24594,7 @@ pub unsafe fn _mm_mask_unpackhi_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpackhi_epi32&expand=6014) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhdq))] pub unsafe fn _mm_maskz_unpackhi_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let unpackhi = _mm_unpackhi_epi32(a, b).as_i32x4(); @@ -23067,6 +24607,7 @@ pub unsafe fn _mm_maskz_unpackhi_epi32(k: __mmask8, a: __m128i, b: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpackhi_epi64&expand=6030) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpckhpd))] //should be vpunpckhqdq pub unsafe fn _mm512_unpackhi_epi64(a: __m512i, b: __m512i) -> __m512i { simd_shuffle!(a, b, [1, 9, 1 + 2, 9 + 2, 1 + 4, 9 + 4, 1 + 6, 9 + 6]) @@ -23077,6 +24618,7 @@ pub unsafe fn _mm512_unpackhi_epi64(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpackhi_epi64&expand=6028) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhqdq))] pub unsafe fn _mm512_mask_unpackhi_epi64( src: __m512i, @@ -23093,6 +24635,7 @@ pub unsafe fn _mm512_mask_unpackhi_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpackhi_epi64&expand=6029) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhqdq))] pub unsafe fn _mm512_maskz_unpackhi_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let unpackhi = _mm512_unpackhi_epi64(a, b).as_i64x8(); @@ -23105,6 +24648,7 @@ pub unsafe fn _mm512_maskz_unpackhi_epi64(k: __mmask8, a: __m512i, b: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpackhi_epi64&expand=6025) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhqdq))] pub unsafe fn _mm256_mask_unpackhi_epi64( src: __m256i, @@ -23121,6 +24665,7 @@ pub unsafe fn _mm256_mask_unpackhi_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpackhi_epi64&expand=6026) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhqdq))] pub unsafe fn _mm256_maskz_unpackhi_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let unpackhi = _mm256_unpackhi_epi64(a, b).as_i64x4(); @@ -23133,6 +24678,7 @@ pub unsafe fn _mm256_maskz_unpackhi_epi64(k: __mmask8, a: __m256i, b: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpackhi_epi64&expand=6022) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhqdq))] pub unsafe fn _mm_mask_unpackhi_epi64( src: __m128i, @@ -23149,6 +24695,7 @@ pub unsafe fn _mm_mask_unpackhi_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpackhi_epi64&expand=6023) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckhqdq))] pub unsafe fn _mm_maskz_unpackhi_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let unpackhi = _mm_unpackhi_epi64(a, b).as_i64x2(); @@ -23161,6 +24708,7 @@ pub unsafe fn _mm_maskz_unpackhi_epi64(k: __mmask8, a: __m128i, b: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpackhi_ps&expand=6060) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpckhps))] pub unsafe fn _mm512_unpackhi_ps(a: __m512, b: __m512) -> __m512 { #[rustfmt::skip] @@ -23178,6 +24726,7 @@ pub unsafe fn _mm512_unpackhi_ps(a: __m512, b: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpackhi_ps&expand=6058) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpckhps))] pub unsafe fn _mm512_mask_unpackhi_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { let unpackhi = _mm512_unpackhi_ps(a, b).as_f32x16(); @@ -23189,6 +24738,7 @@ pub unsafe fn _mm512_mask_unpackhi_ps(src: __m512, k: __mmask16, a: __m512, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpackhi_ps&expand=6059) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpckhps))] pub unsafe fn _mm512_maskz_unpackhi_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { let unpackhi = _mm512_unpackhi_ps(a, b).as_f32x16(); @@ -23201,6 +24751,7 @@ pub unsafe fn _mm512_maskz_unpackhi_ps(k: __mmask16, a: __m512, b: __m512) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpackhi_ps&expand=6055) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpckhps))] pub unsafe fn _mm256_mask_unpackhi_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { let unpackhi = _mm256_unpackhi_ps(a, b).as_f32x8(); @@ -23212,6 +24763,7 @@ pub unsafe fn _mm256_mask_unpackhi_ps(src: __m256, k: __mmask8, a: __m256, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpackhi_ps&expand=6056) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpckhps))] pub unsafe fn _mm256_maskz_unpackhi_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { let unpackhi = _mm256_unpackhi_ps(a, b).as_f32x8(); @@ -23224,6 +24776,7 @@ pub unsafe fn _mm256_maskz_unpackhi_ps(k: __mmask8, a: __m256, b: __m256) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpackhi_ps&expand=6052) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpckhps))] pub unsafe fn _mm_mask_unpackhi_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { let unpackhi = _mm_unpackhi_ps(a, b).as_f32x4(); @@ -23235,6 +24788,7 @@ pub unsafe fn _mm_mask_unpackhi_ps(src: __m128, k: __mmask8, a: __m128, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpackhi_ps&expand=6053) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpckhps))] pub unsafe fn _mm_maskz_unpackhi_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { let unpackhi = _mm_unpackhi_ps(a, b).as_f32x4(); @@ -23247,6 +24801,7 @@ pub unsafe fn _mm_maskz_unpackhi_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpackhi_pd&expand=6048) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpckhpd))] pub unsafe fn _mm512_unpackhi_pd(a: __m512d, b: __m512d) -> __m512d { simd_shuffle!(a, b, [1, 9, 1 + 2, 9 + 2, 1 + 4, 9 + 4, 1 + 6, 9 + 6]) @@ -23257,6 +24812,7 @@ pub unsafe fn _mm512_unpackhi_pd(a: __m512d, b: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpackhi_pd&expand=6046) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpckhpd))] pub unsafe fn _mm512_mask_unpackhi_pd( src: __m512d, @@ -23273,6 +24829,7 @@ pub unsafe fn _mm512_mask_unpackhi_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpackhi_pd&expand=6047) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpckhpd))] pub unsafe fn _mm512_maskz_unpackhi_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { let unpackhi = _mm512_unpackhi_pd(a, b).as_f64x8(); @@ -23285,6 +24842,7 @@ pub unsafe fn _mm512_maskz_unpackhi_pd(k: __mmask8, a: __m512d, b: __m512d) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpackhi_pd&expand=6043) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpckhpd))] pub unsafe fn _mm256_mask_unpackhi_pd( src: __m256d, @@ -23301,6 +24859,7 @@ pub unsafe fn _mm256_mask_unpackhi_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpackhi_pd&expand=6044) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpckhpd))] pub unsafe fn _mm256_maskz_unpackhi_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { let unpackhi = _mm256_unpackhi_pd(a, b).as_f64x4(); @@ -23313,6 +24872,7 @@ pub unsafe fn _mm256_maskz_unpackhi_pd(k: __mmask8, a: __m256d, b: __m256d) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpackhi_pd&expand=6040) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpckhpd))] pub unsafe fn _mm_mask_unpackhi_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let unpackhi = _mm_unpackhi_pd(a, b).as_f64x2(); @@ -23324,6 +24884,7 @@ pub unsafe fn _mm_mask_unpackhi_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpackhi_pd&expand=6041) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpckhpd))] pub unsafe fn _mm_maskz_unpackhi_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let unpackhi = _mm_unpackhi_pd(a, b).as_f64x2(); @@ -23336,6 +24897,7 @@ pub unsafe fn _mm_maskz_unpackhi_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpacklo_epi32&expand=6078) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpcklps))] //should be vpunpckldq pub unsafe fn _mm512_unpacklo_epi32(a: __m512i, b: __m512i) -> __m512i { let a = a.as_i32x16(); @@ -23356,6 +24918,7 @@ pub unsafe fn _mm512_unpacklo_epi32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpacklo_epi32&expand=6076) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckldq))] pub unsafe fn _mm512_mask_unpacklo_epi32( src: __m512i, @@ -23372,6 +24935,7 @@ pub unsafe fn _mm512_mask_unpacklo_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpacklo_epi32&expand=6077) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckldq))] pub unsafe fn _mm512_maskz_unpacklo_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let unpacklo = _mm512_unpacklo_epi32(a, b).as_i32x16(); @@ -23384,6 +24948,7 @@ pub unsafe fn _mm512_maskz_unpacklo_epi32(k: __mmask16, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpacklo_epi32&expand=6073) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckldq))] pub unsafe fn _mm256_mask_unpacklo_epi32( src: __m256i, @@ -23400,6 +24965,7 @@ pub unsafe fn _mm256_mask_unpacklo_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpacklo_epi32&expand=6074) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckldq))] pub unsafe fn _mm256_maskz_unpacklo_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let unpacklo = _mm256_unpacklo_epi32(a, b).as_i32x8(); @@ -23412,6 +24978,7 @@ pub unsafe fn _mm256_maskz_unpacklo_epi32(k: __mmask8, a: __m256i, b: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpacklo_epi32&expand=6070) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckldq))] pub unsafe fn _mm_mask_unpacklo_epi32( src: __m128i, @@ -23428,6 +24995,7 @@ pub unsafe fn _mm_mask_unpacklo_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpacklo_epi32&expand=6071) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpckldq))] pub unsafe fn _mm_maskz_unpacklo_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let unpacklo = _mm_unpacklo_epi32(a, b).as_i32x4(); @@ -23440,6 +25008,7 @@ pub unsafe fn _mm_maskz_unpacklo_epi32(k: __mmask8, a: __m128i, b: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpacklo_epi64&expand=6087) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpcklpd))] //should be vpunpcklqdq pub unsafe fn _mm512_unpacklo_epi64(a: __m512i, b: __m512i) -> __m512i { simd_shuffle!(a, b, [0, 8, 0 + 2, 8 + 2, 0 + 4, 8 + 4, 0 + 6, 8 + 6]) @@ -23450,6 +25019,7 @@ pub unsafe fn _mm512_unpacklo_epi64(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpacklo_epi64&expand=6085) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklqdq))] pub unsafe fn _mm512_mask_unpacklo_epi64( src: __m512i, @@ -23466,6 +25036,7 @@ pub unsafe fn _mm512_mask_unpacklo_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpacklo_epi64&expand=6086) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklqdq))] pub unsafe fn _mm512_maskz_unpacklo_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let unpacklo = _mm512_unpacklo_epi64(a, b).as_i64x8(); @@ -23478,6 +25049,7 @@ pub unsafe fn _mm512_maskz_unpacklo_epi64(k: __mmask8, a: __m512i, b: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpacklo_epi64&expand=6082) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklqdq))] pub unsafe fn _mm256_mask_unpacklo_epi64( src: __m256i, @@ -23494,6 +25066,7 @@ pub unsafe fn _mm256_mask_unpacklo_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpacklo_epi64&expand=6083) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklqdq))] pub unsafe fn _mm256_maskz_unpacklo_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let unpacklo = _mm256_unpacklo_epi64(a, b).as_i64x4(); @@ -23506,6 +25079,7 @@ pub unsafe fn _mm256_maskz_unpacklo_epi64(k: __mmask8, a: __m256i, b: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpacklo_epi64&expand=6079) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklqdq))] pub unsafe fn _mm_mask_unpacklo_epi64( src: __m128i, @@ -23522,6 +25096,7 @@ pub unsafe fn _mm_mask_unpacklo_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpacklo_epi64&expand=6080) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpunpcklqdq))] pub unsafe fn _mm_maskz_unpacklo_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let unpacklo = _mm_unpacklo_epi64(a, b).as_i64x2(); @@ -23534,6 +25109,7 @@ pub unsafe fn _mm_maskz_unpacklo_epi64(k: __mmask8, a: __m128i, b: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpacklo_ps&expand=6117) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpcklps))] pub unsafe fn _mm512_unpacklo_ps(a: __m512, b: __m512) -> __m512 { #[rustfmt::skip] @@ -23550,6 +25126,7 @@ pub unsafe fn _mm512_unpacklo_ps(a: __m512, b: __m512) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpacklo_ps&expand=6115) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpcklps))] pub unsafe fn _mm512_mask_unpacklo_ps(src: __m512, k: __mmask16, a: __m512, b: __m512) -> __m512 { let unpacklo = _mm512_unpacklo_ps(a, b).as_f32x16(); @@ -23561,6 +25138,7 @@ pub unsafe fn _mm512_mask_unpacklo_ps(src: __m512, k: __mmask16, a: __m512, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpacklo_ps&expand=6116) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpcklps))] pub unsafe fn _mm512_maskz_unpacklo_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { let unpacklo = _mm512_unpacklo_ps(a, b).as_f32x16(); @@ -23573,6 +25151,7 @@ pub unsafe fn _mm512_maskz_unpacklo_ps(k: __mmask16, a: __m512, b: __m512) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpacklo_ps&expand=6112) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpcklps))] pub unsafe fn _mm256_mask_unpacklo_ps(src: __m256, k: __mmask8, a: __m256, b: __m256) -> __m256 { let unpacklo = _mm256_unpacklo_ps(a, b).as_f32x8(); @@ -23584,6 +25163,7 @@ pub unsafe fn _mm256_mask_unpacklo_ps(src: __m256, k: __mmask8, a: __m256, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpacklo_ps&expand=6113) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpcklps))] pub unsafe fn _mm256_maskz_unpacklo_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { let unpacklo = _mm256_unpacklo_ps(a, b).as_f32x8(); @@ -23596,6 +25176,7 @@ pub unsafe fn _mm256_maskz_unpacklo_ps(k: __mmask8, a: __m256, b: __m256) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpacklo_ps&expand=6109) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpcklps))] pub unsafe fn _mm_mask_unpacklo_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { let unpacklo = _mm_unpacklo_ps(a, b).as_f32x4(); @@ -23607,6 +25188,7 @@ pub unsafe fn _mm_mask_unpacklo_ps(src: __m128, k: __mmask8, a: __m128, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpacklo_ps&expand=6110) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpcklps))] pub unsafe fn _mm_maskz_unpacklo_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { let unpacklo = _mm_unpacklo_ps(a, b).as_f32x4(); @@ -23619,6 +25201,7 @@ pub unsafe fn _mm_maskz_unpacklo_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpacklo_pd&expand=6105) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpcklpd))] pub unsafe fn _mm512_unpacklo_pd(a: __m512d, b: __m512d) -> __m512d { simd_shuffle!(a, b, [0, 8, 0 + 2, 8 + 2, 0 + 4, 8 + 4, 0 + 6, 8 + 6]) @@ -23629,6 +25212,7 @@ pub unsafe fn _mm512_unpacklo_pd(a: __m512d, b: __m512d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpacklo_pd&expand=6103) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpcklpd))] pub unsafe fn _mm512_mask_unpacklo_pd( src: __m512d, @@ -23645,6 +25229,7 @@ pub unsafe fn _mm512_mask_unpacklo_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpacklo_pd&expand=6104) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpcklpd))] pub unsafe fn _mm512_maskz_unpacklo_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { let unpacklo = _mm512_unpacklo_pd(a, b).as_f64x8(); @@ -23657,6 +25242,7 @@ pub unsafe fn _mm512_maskz_unpacklo_pd(k: __mmask8, a: __m512d, b: __m512d) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpacklo_pd&expand=6100) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpcklpd))] pub unsafe fn _mm256_mask_unpacklo_pd( src: __m256d, @@ -23673,6 +25259,7 @@ pub unsafe fn _mm256_mask_unpacklo_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpacklo_pd&expand=6101) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpcklpd))] pub unsafe fn _mm256_maskz_unpacklo_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { let unpacklo = _mm256_unpacklo_pd(a, b).as_f64x4(); @@ -23685,6 +25272,7 @@ pub unsafe fn _mm256_maskz_unpacklo_pd(k: __mmask8, a: __m256d, b: __m256d) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpacklo_pd&expand=6097) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpcklpd))] pub unsafe fn _mm_mask_unpacklo_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let unpacklo = _mm_unpacklo_pd(a, b).as_f64x2(); @@ -23696,6 +25284,7 @@ pub unsafe fn _mm_mask_unpacklo_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpacklo_pd&expand=6098) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vunpcklpd))] pub unsafe fn _mm_maskz_unpacklo_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let unpacklo = _mm_unpacklo_pd(a, b).as_f64x2(); @@ -23708,6 +25297,7 @@ pub unsafe fn _mm_maskz_unpacklo_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castps128_ps512&expand=621) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castps128_ps512(a: __m128) -> __m512 { simd_shuffle!( a, @@ -23721,6 +25311,7 @@ pub unsafe fn _mm512_castps128_ps512(a: __m128) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castps256_ps512&expand=623) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castps256_ps512(a: __m256) -> __m512 { simd_shuffle!( a, @@ -23734,6 +25325,7 @@ pub unsafe fn _mm512_castps256_ps512(a: __m256) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_zextps128_ps512&expand=6196) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_zextps128_ps512(a: __m128) -> __m512 { simd_shuffle!( a, @@ -23747,6 +25339,7 @@ pub unsafe fn _mm512_zextps128_ps512(a: __m128) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_zextps256_ps512&expand=6197) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_zextps256_ps512(a: __m256) -> __m512 { simd_shuffle!( a, @@ -23760,6 +25353,7 @@ pub unsafe fn _mm512_zextps256_ps512(a: __m256) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castps512_ps128&expand=624) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castps512_ps128(a: __m512) -> __m128 { simd_shuffle!(a, a, [0, 1, 2, 3]) } @@ -23769,6 +25363,7 @@ pub unsafe fn _mm512_castps512_ps128(a: __m512) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castps512_ps256&expand=625) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castps512_ps256(a: __m512) -> __m256 { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) } @@ -23778,6 +25373,7 @@ pub unsafe fn _mm512_castps512_ps256(a: __m512) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castps_pd&expand=616) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castps_pd(a: __m512) -> __m512d { transmute(a.as_m512()) } @@ -23787,6 +25383,7 @@ pub unsafe fn _mm512_castps_pd(a: __m512) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castps_si512&expand=619) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castps_si512(a: __m512) -> __m512i { transmute(a.as_m512()) } @@ -23796,6 +25393,7 @@ pub unsafe fn _mm512_castps_si512(a: __m512) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castpd128_pd512&expand=609) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castpd128_pd512(a: __m128d) -> __m512d { simd_shuffle!(a, _mm_set1_pd(-1.), [0, 1, 2, 2, 2, 2, 2, 2]) } @@ -23805,6 +25403,7 @@ pub unsafe fn _mm512_castpd128_pd512(a: __m128d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castpd256_pd512&expand=611) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castpd256_pd512(a: __m256d) -> __m512d { simd_shuffle!(a, _mm256_set1_pd(-1.), [0, 1, 2, 3, 4, 4, 4, 4]) } @@ -23814,6 +25413,7 @@ pub unsafe fn _mm512_castpd256_pd512(a: __m256d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_zextpd128_pd512&expand=6193) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_zextpd128_pd512(a: __m128d) -> __m512d { simd_shuffle!(a, _mm_set1_pd(0.), [0, 1, 2, 2, 2, 2, 2, 2]) } @@ -23823,6 +25423,7 @@ pub unsafe fn _mm512_zextpd128_pd512(a: __m128d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_zextpd256_pd512&expand=6194) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_zextpd256_pd512(a: __m256d) -> __m512d { simd_shuffle!(a, _mm256_set1_pd(0.), [0, 1, 2, 3, 4, 4, 4, 4]) } @@ -23832,6 +25433,7 @@ pub unsafe fn _mm512_zextpd256_pd512(a: __m256d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castpd512_pd128&expand=612) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castpd512_pd128(a: __m512d) -> __m128d { simd_shuffle!(a, a, [0, 1]) } @@ -23841,6 +25443,7 @@ pub unsafe fn _mm512_castpd512_pd128(a: __m512d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castpd512_pd256&expand=613) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castpd512_pd256(a: __m512d) -> __m256d { simd_shuffle!(a, a, [0, 1, 2, 3]) } @@ -23850,6 +25453,7 @@ pub unsafe fn _mm512_castpd512_pd256(a: __m512d) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castpd_ps&expand=604) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castpd_ps(a: __m512d) -> __m512 { transmute(a.as_m512d()) } @@ -23859,6 +25463,7 @@ pub unsafe fn _mm512_castpd_ps(a: __m512d) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castpd_si512&expand=607) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castpd_si512(a: __m512d) -> __m512i { transmute(a.as_m512d()) } @@ -23868,6 +25473,7 @@ pub unsafe fn _mm512_castpd_si512(a: __m512d) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castsi128_si512&expand=629) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castsi128_si512(a: __m128i) -> __m512i { simd_shuffle!(a, _mm_set1_epi64x(-1), [0, 1, 2, 2, 2, 2, 2, 2]) } @@ -23877,6 +25483,7 @@ pub unsafe fn _mm512_castsi128_si512(a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castsi256_si512&expand=633) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castsi256_si512(a: __m256i) -> __m512i { simd_shuffle!(a, _mm256_set1_epi64x(-1), [0, 1, 2, 3, 4, 4, 4, 4]) } @@ -23886,6 +25493,7 @@ pub unsafe fn _mm512_castsi256_si512(a: __m256i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_zextsi128_si512&expand=6199) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_zextsi128_si512(a: __m128i) -> __m512i { simd_shuffle!(a, _mm_set1_epi64x(0), [0, 1, 2, 2, 2, 2, 2, 2]) } @@ -23895,6 +25503,7 @@ pub unsafe fn _mm512_zextsi128_si512(a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_zextsi256_si512&expand=6200) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_zextsi256_si512(a: __m256i) -> __m512i { simd_shuffle!(a, _mm256_set1_epi64x(0), [0, 1, 2, 3, 4, 4, 4, 4]) } @@ -23904,6 +25513,7 @@ pub unsafe fn _mm512_zextsi256_si512(a: __m256i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castsi512_si128&expand=636) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castsi512_si128(a: __m512i) -> __m128i { simd_shuffle!(a, a, [0, 1]) } @@ -23913,6 +25523,7 @@ pub unsafe fn _mm512_castsi512_si128(a: __m512i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castsi512_si256&expand=637) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castsi512_si256(a: __m512i) -> __m256i { simd_shuffle!(a, a, [0, 1, 2, 3]) } @@ -23922,6 +25533,7 @@ pub unsafe fn _mm512_castsi512_si256(a: __m512i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castsi512_ps&expand=635) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castsi512_ps(a: __m512i) -> __m512 { transmute(a) } @@ -23931,6 +25543,7 @@ pub unsafe fn _mm512_castsi512_ps(a: __m512i) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castsi512_pd&expand=634) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_castsi512_pd(a: __m512i) -> __m512d { transmute(a) } @@ -23940,6 +25553,7 @@ pub unsafe fn _mm512_castsi512_pd(a: __m512i) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtsi512_si32&expand=1882) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(all(test, not(target_os = "windows")), assert_instr(vmovd))] pub unsafe fn _mm512_cvtsi512_si32(a: __m512i) -> i32 { let extract: i32 = simd_extract(a.as_i32x16(), 0); @@ -23951,6 +25565,7 @@ pub unsafe fn _mm512_cvtsi512_si32(a: __m512i) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastd_epi32&expand=545) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vbroadcast))] //should be vpbroadcastd pub unsafe fn _mm512_broadcastd_epi32(a: __m128i) -> __m512i { let a = _mm512_castsi128_si512(a).as_i32x16(); @@ -23963,6 +25578,7 @@ pub unsafe fn _mm512_broadcastd_epi32(a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_broadcastd_epi32&expand=546) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd pub unsafe fn _mm512_mask_broadcastd_epi32(src: __m512i, k: __mmask16, a: __m128i) -> __m512i { let broadcast = _mm512_broadcastd_epi32(a).as_i32x16(); @@ -23974,6 +25590,7 @@ pub unsafe fn _mm512_mask_broadcastd_epi32(src: __m512i, k: __mmask16, a: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_broadcastd_epi32&expand=547) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd pub unsafe fn _mm512_maskz_broadcastd_epi32(k: __mmask16, a: __m128i) -> __m512i { let broadcast = _mm512_broadcastd_epi32(a).as_i32x16(); @@ -23986,6 +25603,7 @@ pub unsafe fn _mm512_maskz_broadcastd_epi32(k: __mmask16, a: __m128i) -> __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_broadcastd_epi32&expand=543) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd pub unsafe fn _mm256_mask_broadcastd_epi32(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { let broadcast = _mm256_broadcastd_epi32(a).as_i32x8(); @@ -23997,6 +25615,7 @@ pub unsafe fn _mm256_mask_broadcastd_epi32(src: __m256i, k: __mmask8, a: __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_broadcastd_epi32&expand=544) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd pub unsafe fn _mm256_maskz_broadcastd_epi32(k: __mmask8, a: __m128i) -> __m256i { let broadcast = _mm256_broadcastd_epi32(a).as_i32x8(); @@ -24009,6 +25628,7 @@ pub unsafe fn _mm256_maskz_broadcastd_epi32(k: __mmask8, a: __m128i) -> __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_broadcastd_epi32&expand=540) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd pub unsafe fn _mm_mask_broadcastd_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let broadcast = _mm_broadcastd_epi32(a).as_i32x4(); @@ -24020,6 +25640,7 @@ pub unsafe fn _mm_mask_broadcastd_epi32(src: __m128i, k: __mmask8, a: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_broadcastd_epi32&expand=541) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd pub unsafe fn _mm_maskz_broadcastd_epi32(k: __mmask8, a: __m128i) -> __m128i { let broadcast = _mm_broadcastd_epi32(a).as_i32x4(); @@ -24032,6 +25653,7 @@ pub unsafe fn _mm_maskz_broadcastd_epi32(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastq_epi64&expand=560) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vbroadcas))] //should be vpbroadcastq pub unsafe fn _mm512_broadcastq_epi64(a: __m128i) -> __m512i { simd_shuffle!(a, a, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -24042,6 +25664,7 @@ pub unsafe fn _mm512_broadcastq_epi64(a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_broadcastq_epi64&expand=561) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq pub unsafe fn _mm512_mask_broadcastq_epi64(src: __m512i, k: __mmask8, a: __m128i) -> __m512i { let broadcast = _mm512_broadcastq_epi64(a).as_i64x8(); @@ -24053,6 +25676,7 @@ pub unsafe fn _mm512_mask_broadcastq_epi64(src: __m512i, k: __mmask8, a: __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_broadcastq_epi64&expand=562) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq pub unsafe fn _mm512_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m512i { let broadcast = _mm512_broadcastq_epi64(a).as_i64x8(); @@ -24065,6 +25689,7 @@ pub unsafe fn _mm512_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_broadcastq_epi64&expand=558) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq pub unsafe fn _mm256_mask_broadcastq_epi64(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { let broadcast = _mm256_broadcastq_epi64(a).as_i64x4(); @@ -24076,6 +25701,7 @@ pub unsafe fn _mm256_mask_broadcastq_epi64(src: __m256i, k: __mmask8, a: __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_broadcastq_epi64&expand=559) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq pub unsafe fn _mm256_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m256i { let broadcast = _mm256_broadcastq_epi64(a).as_i64x4(); @@ -24088,6 +25714,7 @@ pub unsafe fn _mm256_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_broadcastq_epi64&expand=555) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq pub unsafe fn _mm_mask_broadcastq_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { let broadcast = _mm_broadcastq_epi64(a).as_i64x2(); @@ -24099,6 +25726,7 @@ pub unsafe fn _mm_mask_broadcastq_epi64(src: __m128i, k: __mmask8, a: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_broadcastq_epi64&expand=556) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq pub unsafe fn _mm_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m128i { let broadcast = _mm_broadcastq_epi64(a).as_i64x2(); @@ -24111,6 +25739,7 @@ pub unsafe fn _mm_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastss_ps&expand=578) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vbroadcastss))] pub unsafe fn _mm512_broadcastss_ps(a: __m128) -> __m512 { simd_shuffle!(a, a, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) @@ -24121,6 +25750,7 @@ pub unsafe fn _mm512_broadcastss_ps(a: __m128) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_broadcastss_ps&expand=579) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vbroadcastss))] pub unsafe fn _mm512_mask_broadcastss_ps(src: __m512, k: __mmask16, a: __m128) -> __m512 { let broadcast = _mm512_broadcastss_ps(a).as_f32x16(); @@ -24132,6 +25762,7 @@ pub unsafe fn _mm512_mask_broadcastss_ps(src: __m512, k: __mmask16, a: __m128) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_broadcastss_ps&expand=580) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vbroadcastss))] pub unsafe fn _mm512_maskz_broadcastss_ps(k: __mmask16, a: __m128) -> __m512 { let broadcast = _mm512_broadcastss_ps(a).as_f32x16(); @@ -24144,6 +25775,7 @@ pub unsafe fn _mm512_maskz_broadcastss_ps(k: __mmask16, a: __m128) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_broadcastss_ps&expand=576) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vbroadcastss))] pub unsafe fn _mm256_mask_broadcastss_ps(src: __m256, k: __mmask8, a: __m128) -> __m256 { let broadcast = _mm256_broadcastss_ps(a).as_f32x8(); @@ -24155,6 +25787,7 @@ pub unsafe fn _mm256_mask_broadcastss_ps(src: __m256, k: __mmask8, a: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_broadcastss_ps&expand=577) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vbroadcastss))] pub unsafe fn _mm256_maskz_broadcastss_ps(k: __mmask8, a: __m128) -> __m256 { let broadcast = _mm256_broadcastss_ps(a).as_f32x8(); @@ -24167,6 +25800,7 @@ pub unsafe fn _mm256_maskz_broadcastss_ps(k: __mmask8, a: __m128) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_broadcastss_ps&expand=573) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vbroadcastss))] pub unsafe fn _mm_mask_broadcastss_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 { let broadcast = _mm_broadcastss_ps(a).as_f32x4(); @@ -24178,6 +25812,7 @@ pub unsafe fn _mm_mask_broadcastss_ps(src: __m128, k: __mmask8, a: __m128) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_broadcastss_ps&expand=574) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vbroadcastss))] pub unsafe fn _mm_maskz_broadcastss_ps(k: __mmask8, a: __m128) -> __m128 { let broadcast = _mm_broadcastss_ps(a).as_f32x4(); @@ -24190,6 +25825,7 @@ pub unsafe fn _mm_maskz_broadcastss_ps(k: __mmask8, a: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastsd_pd&expand=567) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vbroadcastsd))] pub unsafe fn _mm512_broadcastsd_pd(a: __m128d) -> __m512d { simd_shuffle!(a, a, [0, 0, 0, 0, 0, 0, 0, 0]) @@ -24200,6 +25836,7 @@ pub unsafe fn _mm512_broadcastsd_pd(a: __m128d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_broadcastsd_pd&expand=568) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vbroadcastsd))] pub unsafe fn _mm512_mask_broadcastsd_pd(src: __m512d, k: __mmask8, a: __m128d) -> __m512d { let broadcast = _mm512_broadcastsd_pd(a).as_f64x8(); @@ -24211,6 +25848,7 @@ pub unsafe fn _mm512_mask_broadcastsd_pd(src: __m512d, k: __mmask8, a: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_broadcastsd_pd&expand=569) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vbroadcastsd))] pub unsafe fn _mm512_maskz_broadcastsd_pd(k: __mmask8, a: __m128d) -> __m512d { let broadcast = _mm512_broadcastsd_pd(a).as_f64x8(); @@ -24223,6 +25861,7 @@ pub unsafe fn _mm512_maskz_broadcastsd_pd(k: __mmask8, a: __m128d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_broadcastsd_pd&expand=565) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vbroadcastsd))] pub unsafe fn _mm256_mask_broadcastsd_pd(src: __m256d, k: __mmask8, a: __m128d) -> __m256d { let broadcast = _mm256_broadcastsd_pd(a).as_f64x4(); @@ -24234,6 +25873,7 @@ pub unsafe fn _mm256_mask_broadcastsd_pd(src: __m256d, k: __mmask8, a: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_broadcastsd_pd&expand=566) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vbroadcastsd))] pub unsafe fn _mm256_maskz_broadcastsd_pd(k: __mmask8, a: __m128d) -> __m256d { let broadcast = _mm256_broadcastsd_pd(a).as_f64x4(); @@ -24246,6 +25886,7 @@ pub unsafe fn _mm256_maskz_broadcastsd_pd(k: __mmask8, a: __m128d) -> __m256d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcast_i32x4&expand=510) #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcasti32x4, linux: vshuf +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_broadcast_i32x4(a: __m128i) -> __m512i { let a = a.as_i32x4(); let ret: i32x16 = simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]); @@ -24257,6 +25898,7 @@ pub unsafe fn _mm512_broadcast_i32x4(a: __m128i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_broadcast_i32x4&expand=511) #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcasti32x4, linux: vshuf +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_broadcast_i32x4(src: __m512i, k: __mmask16, a: __m128i) -> __m512i { let broadcast = _mm512_broadcast_i32x4(a).as_i32x16(); transmute(simd_select_bitmask(k, broadcast, src.as_i32x16())) @@ -24267,6 +25909,7 @@ pub unsafe fn _mm512_mask_broadcast_i32x4(src: __m512i, k: __mmask16, a: __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_broadcast_i32x4&expand=512) #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcasti32x4, linux: vshuf +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_broadcast_i32x4(k: __mmask16, a: __m128i) -> __m512i { let broadcast = _mm512_broadcast_i32x4(a).as_i32x16(); let zero = _mm512_setzero_si512().as_i32x16(); @@ -24278,6 +25921,7 @@ pub unsafe fn _mm512_maskz_broadcast_i32x4(k: __mmask16, a: __m128i) -> __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcast_i32x4&expand=507) #[inline] #[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcasti32x4, linux: vshuf +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_broadcast_i32x4(a: __m128i) -> __m256i { let a = a.as_i32x4(); let ret: i32x8 = simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3]); @@ -24289,6 +25933,7 @@ pub unsafe fn _mm256_broadcast_i32x4(a: __m128i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_broadcast_i32x4&expand=508) #[inline] #[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcasti32x4, linux: vshuf +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_broadcast_i32x4(src: __m256i, k: __mmask8, a: __m128i) -> __m256i { let broadcast = _mm256_broadcast_i32x4(a).as_i32x8(); transmute(simd_select_bitmask(k, broadcast, src.as_i32x8())) @@ -24299,6 +25944,7 @@ pub unsafe fn _mm256_mask_broadcast_i32x4(src: __m256i, k: __mmask8, a: __m128i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_broadcast_i32x4&expand=509) #[inline] #[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcasti32x4, linux: vshuf +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_broadcast_i32x4(k: __mmask8, a: __m128i) -> __m256i { let broadcast = _mm256_broadcast_i32x4(a).as_i32x8(); let zero = _mm256_setzero_si256().as_i32x8(); @@ -24310,6 +25956,7 @@ pub unsafe fn _mm256_maskz_broadcast_i32x4(k: __mmask8, a: __m128i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_broadcast_i64x4&expand=522) #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcasti64x4, linux: vperm +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_broadcast_i64x4(a: __m256i) -> __m512i { simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3]) } @@ -24319,6 +25966,7 @@ pub unsafe fn _mm512_broadcast_i64x4(a: __m256i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mask_broadcast_i64x4&expand=523) #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcasti64x4, linux: vperm +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_broadcast_i64x4(src: __m512i, k: __mmask8, a: __m256i) -> __m512i { let broadcast = _mm512_broadcast_i64x4(a).as_i64x8(); transmute(simd_select_bitmask(k, broadcast, src.as_i64x8())) @@ -24329,6 +25977,7 @@ pub unsafe fn _mm512_mask_broadcast_i64x4(src: __m512i, k: __mmask8, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_maskz_broadcast_i64x4&expand=524) #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcasti64x4, linux: vperm +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_broadcast_i64x4(k: __mmask8, a: __m256i) -> __m512i { let broadcast = _mm512_broadcast_i64x4(a).as_i64x8(); let zero = _mm512_setzero_si512().as_i64x8(); @@ -24340,6 +25989,7 @@ pub unsafe fn _mm512_maskz_broadcast_i64x4(k: __mmask8, a: __m256i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcast_f32x4&expand=483) #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcastf32x4, linux: vshuf +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_broadcast_f32x4(a: __m128) -> __m512 { simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]) } @@ -24349,6 +25999,7 @@ pub unsafe fn _mm512_broadcast_f32x4(a: __m128) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_broadcast_f32x4&expand=484) #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcastf32x4, linux: vshu +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_broadcast_f32x4(src: __m512, k: __mmask16, a: __m128) -> __m512 { let broadcast = _mm512_broadcast_f32x4(a).as_f32x16(); transmute(simd_select_bitmask(k, broadcast, src.as_f32x16())) @@ -24359,6 +26010,7 @@ pub unsafe fn _mm512_mask_broadcast_f32x4(src: __m512, k: __mmask16, a: __m128) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_broadcast_f32x4&expand=485) #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcastf32x4, linux: vshu +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_broadcast_f32x4(k: __mmask16, a: __m128) -> __m512 { let broadcast = _mm512_broadcast_f32x4(a).as_f32x16(); let zero = _mm512_setzero_ps().as_f32x16(); @@ -24370,6 +26022,7 @@ pub unsafe fn _mm512_maskz_broadcast_f32x4(k: __mmask16, a: __m128) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcast_f32x4&expand=480) #[inline] #[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcastf32x4, linux: vshuf +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_broadcast_f32x4(a: __m128) -> __m256 { simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3]) } @@ -24379,6 +26032,7 @@ pub unsafe fn _mm256_broadcast_f32x4(a: __m128) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_broadcast_f32x4&expand=481) #[inline] #[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcastf32x4, linux: vshu +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_broadcast_f32x4(src: __m256, k: __mmask8, a: __m128) -> __m256 { let broadcast = _mm256_broadcast_f32x4(a).as_f32x8(); transmute(simd_select_bitmask(k, broadcast, src.as_f32x8())) @@ -24389,6 +26043,7 @@ pub unsafe fn _mm256_mask_broadcast_f32x4(src: __m256, k: __mmask8, a: __m128) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_broadcast_f32x4&expand=482) #[inline] #[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcastf32x4, linux: vshu +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_broadcast_f32x4(k: __mmask8, a: __m128) -> __m256 { let broadcast = _mm256_broadcast_f32x4(a).as_f32x8(); let zero = _mm256_setzero_ps().as_f32x8(); @@ -24400,6 +26055,7 @@ pub unsafe fn _mm256_maskz_broadcast_f32x4(k: __mmask8, a: __m128) -> __m256 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_broadcast_f64x4&expand=495) #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcastf64x4, linux: vperm +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_broadcast_f64x4(a: __m256d) -> __m512d { simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3]) } @@ -24409,6 +26065,7 @@ pub unsafe fn _mm512_broadcast_f64x4(a: __m256d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mask_broadcast_f64x4&expand=496) #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcastf64x4, linux: vper +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_broadcast_f64x4(src: __m512d, k: __mmask8, a: __m256d) -> __m512d { let broadcast = _mm512_broadcast_f64x4(a).as_f64x8(); transmute(simd_select_bitmask(k, broadcast, src.as_f64x8())) @@ -24419,6 +26076,7 @@ pub unsafe fn _mm512_mask_broadcast_f64x4(src: __m512d, k: __mmask8, a: __m256d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_maskz_broadcast_f64x4&expand=497) #[inline] #[target_feature(enable = "avx512f")] //msvc: vbroadcastf64x4, linux: vper +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_broadcast_f64x4(k: __mmask8, a: __m256d) -> __m512d { let broadcast = _mm512_broadcast_f64x4(a).as_f64x8(); let zero = _mm512_setzero_pd().as_f64x8(); @@ -24430,6 +26088,7 @@ pub unsafe fn _mm512_maskz_broadcast_f64x4(k: __mmask8, a: __m256d) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_blend_epi32&expand=435) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa32))] //should be vpblendmd pub unsafe fn _mm512_mask_blend_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { transmute(simd_select_bitmask(k, b.as_i32x16(), a.as_i32x16())) @@ -24440,6 +26099,7 @@ pub unsafe fn _mm512_mask_blend_epi32(k: __mmask16, a: __m512i, b: __m512i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_blend_epi32&expand=434) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa32))] //should be vpblendmd pub unsafe fn _mm256_mask_blend_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { transmute(simd_select_bitmask(k, b.as_i32x8(), a.as_i32x8())) @@ -24450,6 +26110,7 @@ pub unsafe fn _mm256_mask_blend_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_blend_epi32&expand=432) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa32))] //should be vpblendmd pub unsafe fn _mm_mask_blend_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { transmute(simd_select_bitmask(k, b.as_i32x4(), a.as_i32x4())) @@ -24460,6 +26121,7 @@ pub unsafe fn _mm_mask_blend_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_blend_epi64&expand=438) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa64))] //should be vpblendmq pub unsafe fn _mm512_mask_blend_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { transmute(simd_select_bitmask(k, b.as_i64x8(), a.as_i64x8())) @@ -24470,6 +26132,7 @@ pub unsafe fn _mm512_mask_blend_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_blend_epi64&expand=437) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa64))] //should be vpblendmq pub unsafe fn _mm256_mask_blend_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { transmute(simd_select_bitmask(k, b.as_i64x4(), a.as_i64x4())) @@ -24480,6 +26143,7 @@ pub unsafe fn _mm256_mask_blend_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_blend_epi64&expand=436) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovdqa64))] //should be vpblendmq pub unsafe fn _mm_mask_blend_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { transmute(simd_select_bitmask(k, b.as_i64x2(), a.as_i64x2())) @@ -24490,6 +26154,7 @@ pub unsafe fn _mm_mask_blend_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_blend_ps&expand=451) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vpblendmps pub unsafe fn _mm512_mask_blend_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 { transmute(simd_select_bitmask(k, b.as_f32x16(), a.as_f32x16())) @@ -24500,6 +26165,7 @@ pub unsafe fn _mm512_mask_blend_ps(k: __mmask16, a: __m512, b: __m512) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_blend_ps&expand=450) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vpblendmps pub unsafe fn _mm256_mask_blend_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 { transmute(simd_select_bitmask(k, b.as_f32x8(), a.as_f32x8())) @@ -24510,6 +26176,7 @@ pub unsafe fn _mm256_mask_blend_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_blend_ps&expand=448) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vpblendmps pub unsafe fn _mm_mask_blend_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { transmute(simd_select_bitmask(k, b.as_f32x4(), a.as_f32x4())) @@ -24520,6 +26187,7 @@ pub unsafe fn _mm_mask_blend_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_blend_pd&expand=446) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovapd))] //should be vpblendmpd pub unsafe fn _mm512_mask_blend_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512d { transmute(simd_select_bitmask(k, b.as_f64x8(), a.as_f64x8())) @@ -24530,6 +26198,7 @@ pub unsafe fn _mm512_mask_blend_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_blend_pd&expand=445) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovapd))] //should be vpblendmpd pub unsafe fn _mm256_mask_blend_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256d { transmute(simd_select_bitmask(k, b.as_f64x4(), a.as_f64x4())) @@ -24540,6 +26209,7 @@ pub unsafe fn _mm256_mask_blend_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_blend_pd&expand=443) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovapd))] //should be vpblendmpd pub unsafe fn _mm_mask_blend_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { transmute(simd_select_bitmask(k, b.as_f64x2(), a.as_f64x2())) @@ -24550,6 +26220,7 @@ pub unsafe fn _mm_mask_blend_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_alignr_epi32&expand=245) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_alignr_epi32(a: __m512i, b: __m512i) -> __m512i { @@ -24623,6 +26294,7 @@ pub unsafe fn _mm512_alignr_epi32(a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mask_alignr_epi32&expand=246) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_alignr_epi32( @@ -24641,6 +26313,7 @@ pub unsafe fn _mm512_mask_alignr_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_maskz_alignr_epi32&expand=247) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_alignr_epi32( @@ -24659,6 +26332,7 @@ pub unsafe fn _mm512_maskz_alignr_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_alignr_epi32&expand=242) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_alignr_epi32(a: __m256i, b: __m256i) -> __m256i { @@ -24692,6 +26366,7 @@ pub unsafe fn _mm256_alignr_epi32(a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_alignr_epi32&expand=243) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_alignr_epi32( @@ -24710,6 +26385,7 @@ pub unsafe fn _mm256_mask_alignr_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_alignr_epi32&expand=244) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_alignr_epi32( @@ -24728,6 +26404,7 @@ pub unsafe fn _mm256_maskz_alignr_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_alignr_epi32&expand=239) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] //should be valignd #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_alignr_epi32(a: __m128i, b: __m128i) -> __m128i { @@ -24753,6 +26430,7 @@ pub unsafe fn _mm_alignr_epi32(a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_alignr_epi32&expand=240) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_alignr_epi32( @@ -24771,6 +26449,7 @@ pub unsafe fn _mm_mask_alignr_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_alignr_epi32&expand=241) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(valignd, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_alignr_epi32( @@ -24789,6 +26468,7 @@ pub unsafe fn _mm_maskz_alignr_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_alignr_epi64&expand=254) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_alignr_epi64(a: __m512i, b: __m512i) -> __m512i { @@ -24812,6 +26492,7 @@ pub unsafe fn _mm512_alignr_epi64(a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mask_alignr_epi64&expand=255) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_alignr_epi64( @@ -24830,6 +26511,7 @@ pub unsafe fn _mm512_mask_alignr_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_maskz_alignr_epi64&expand=256) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_alignr_epi64( @@ -24848,6 +26530,7 @@ pub unsafe fn _mm512_maskz_alignr_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_alignr_epi64&expand=251) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_alignr_epi64(a: __m256i, b: __m256i) -> __m256i { @@ -24871,6 +26554,7 @@ pub unsafe fn _mm256_alignr_epi64(a: __m256i, b: __m256i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_alignr_epi64&expand=252) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_alignr_epi64( @@ -24889,6 +26573,7 @@ pub unsafe fn _mm256_mask_alignr_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_alignr_epi64&expand=253) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_alignr_epi64( @@ -24907,6 +26592,7 @@ pub unsafe fn _mm256_maskz_alignr_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_alignr_epi64&expand=248) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] //should be valignq #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_alignr_epi64(a: __m128i, b: __m128i) -> __m128i { @@ -24926,6 +26612,7 @@ pub unsafe fn _mm_alignr_epi64(a: __m128i, b: __m128i) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_alignr_epi64&expand=249) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_alignr_epi64( @@ -24944,6 +26631,7 @@ pub unsafe fn _mm_mask_alignr_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_alignr_epi64&expand=250) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(valignq, IMM8 = 1))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_alignr_epi64( @@ -24962,6 +26650,7 @@ pub unsafe fn _mm_maskz_alignr_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_and_epi32&expand=272) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandq))] //should be vpandd, but generate vpandq pub unsafe fn _mm512_and_epi32(a: __m512i, b: __m512i) -> __m512i { transmute(simd_and(a.as_i32x16(), b.as_i32x16())) @@ -24972,6 +26661,7 @@ pub unsafe fn _mm512_and_epi32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_and_epi32&expand=273) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandd))] pub unsafe fn _mm512_mask_and_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let and = _mm512_and_epi32(a, b).as_i32x16(); @@ -24983,6 +26673,7 @@ pub unsafe fn _mm512_mask_and_epi32(src: __m512i, k: __mmask16, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_and_epi32&expand=274) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandd))] pub unsafe fn _mm512_maskz_and_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let and = _mm512_and_epi32(a, b).as_i32x16(); @@ -24995,6 +26686,7 @@ pub unsafe fn _mm512_maskz_and_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_and_epi32&expand=270) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandd))] pub unsafe fn _mm256_mask_and_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let and = simd_and(a.as_i32x8(), b.as_i32x8()); @@ -25006,6 +26698,7 @@ pub unsafe fn _mm256_mask_and_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_and_epi32&expand=271) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandd))] pub unsafe fn _mm256_maskz_and_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let and = simd_and(a.as_i32x8(), b.as_i32x8()); @@ -25018,6 +26711,7 @@ pub unsafe fn _mm256_maskz_and_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_and_epi32&expand=268) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandd))] pub unsafe fn _mm_mask_and_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let and = simd_and(a.as_i32x4(), b.as_i32x4()); @@ -25029,6 +26723,7 @@ pub unsafe fn _mm_mask_and_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_and_epi32&expand=269) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandd))] pub unsafe fn _mm_maskz_and_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let and = simd_and(a.as_i32x4(), b.as_i32x4()); @@ -25041,6 +26736,7 @@ pub unsafe fn _mm_maskz_and_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_and_epi64&expand=279) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandq))] pub unsafe fn _mm512_and_epi64(a: __m512i, b: __m512i) -> __m512i { transmute(simd_and(a.as_i64x8(), b.as_i64x8())) @@ -25051,6 +26747,7 @@ pub unsafe fn _mm512_and_epi64(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_and_epi64&expand=280) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandq))] pub unsafe fn _mm512_mask_and_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let and = _mm512_and_epi64(a, b).as_i64x8(); @@ -25062,6 +26759,7 @@ pub unsafe fn _mm512_mask_and_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_and_epi64&expand=281) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandq))] pub unsafe fn _mm512_maskz_and_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let and = _mm512_and_epi64(a, b).as_i64x8(); @@ -25074,6 +26772,7 @@ pub unsafe fn _mm512_maskz_and_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_and_epi64&expand=277) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandq))] pub unsafe fn _mm256_mask_and_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let and = simd_and(a.as_i64x4(), b.as_i64x4()); @@ -25085,6 +26784,7 @@ pub unsafe fn _mm256_mask_and_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_and_epi64&expand=278) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandq))] pub unsafe fn _mm256_maskz_and_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let and = simd_and(a.as_i64x4(), b.as_i64x4()); @@ -25097,6 +26797,7 @@ pub unsafe fn _mm256_maskz_and_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_and_epi64&expand=275) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandq))] pub unsafe fn _mm_mask_and_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let and = simd_and(a.as_i64x2(), b.as_i64x2()); @@ -25108,6 +26809,7 @@ pub unsafe fn _mm_mask_and_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_and_epi64&expand=276) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandq))] pub unsafe fn _mm_maskz_and_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let and = simd_and(a.as_i64x2(), b.as_i64x2()); @@ -25120,6 +26822,7 @@ pub unsafe fn _mm_maskz_and_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_and_si512&expand=302) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandq))] pub unsafe fn _mm512_and_si512(a: __m512i, b: __m512i) -> __m512i { transmute(simd_and(a.as_i32x16(), b.as_i32x16())) @@ -25130,6 +26833,7 @@ pub unsafe fn _mm512_and_si512(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_or_epi32&expand=4042) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vporq))] pub unsafe fn _mm512_or_epi32(a: __m512i, b: __m512i) -> __m512i { transmute(simd_or(a.as_i32x16(), b.as_i32x16())) @@ -25140,6 +26844,7 @@ pub unsafe fn _mm512_or_epi32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_or_epi32&expand=4040) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpord))] pub unsafe fn _mm512_mask_or_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let or = _mm512_or_epi32(a, b).as_i32x16(); @@ -25151,6 +26856,7 @@ pub unsafe fn _mm512_mask_or_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_or_epi32&expand=4041) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpord))] pub unsafe fn _mm512_maskz_or_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let or = _mm512_or_epi32(a, b).as_i32x16(); @@ -25163,6 +26869,7 @@ pub unsafe fn _mm512_maskz_or_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_or_epi32&expand=4039) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vor))] //should be vpord pub unsafe fn _mm256_or_epi32(a: __m256i, b: __m256i) -> __m256i { transmute(simd_or(a.as_i32x8(), b.as_i32x8())) @@ -25173,6 +26880,7 @@ pub unsafe fn _mm256_or_epi32(a: __m256i, b: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_or_epi32&expand=4037) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpord))] pub unsafe fn _mm256_mask_or_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let or = _mm256_or_epi32(a, b).as_i32x8(); @@ -25184,6 +26892,7 @@ pub unsafe fn _mm256_mask_or_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_or_epi32&expand=4038) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpord))] pub unsafe fn _mm256_maskz_or_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let or = _mm256_or_epi32(a, b).as_i32x8(); @@ -25196,6 +26905,7 @@ pub unsafe fn _mm256_maskz_or_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_or_epi32&expand=4036) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vor))] //should be vpord pub unsafe fn _mm_or_epi32(a: __m128i, b: __m128i) -> __m128i { transmute(simd_or(a.as_i32x4(), b.as_i32x4())) @@ -25206,6 +26916,7 @@ pub unsafe fn _mm_or_epi32(a: __m128i, b: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_or_epi32&expand=4034) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpord))] pub unsafe fn _mm_mask_or_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let or = _mm_or_epi32(a, b).as_i32x4(); @@ -25217,6 +26928,7 @@ pub unsafe fn _mm_mask_or_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_or_epi32&expand=4035) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpord))] pub unsafe fn _mm_maskz_or_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let or = _mm_or_epi32(a, b).as_i32x4(); @@ -25229,6 +26941,7 @@ pub unsafe fn _mm_maskz_or_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_or_epi64&expand=4051) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vporq))] pub unsafe fn _mm512_or_epi64(a: __m512i, b: __m512i) -> __m512i { transmute(simd_or(a.as_i64x8(), b.as_i64x8())) @@ -25239,6 +26952,7 @@ pub unsafe fn _mm512_or_epi64(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_or_epi64&expand=4049) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vporq))] pub unsafe fn _mm512_mask_or_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let or = _mm512_or_epi64(a, b).as_i64x8(); @@ -25250,6 +26964,7 @@ pub unsafe fn _mm512_mask_or_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_or_epi64&expand=4050) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vporq))] pub unsafe fn _mm512_maskz_or_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let or = _mm512_or_epi64(a, b).as_i64x8(); @@ -25262,6 +26977,7 @@ pub unsafe fn _mm512_maskz_or_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_or_epi64&expand=4048) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vor))] //should be vporq pub unsafe fn _mm256_or_epi64(a: __m256i, b: __m256i) -> __m256i { transmute(simd_or(a.as_i64x4(), b.as_i64x4())) @@ -25272,6 +26988,7 @@ pub unsafe fn _mm256_or_epi64(a: __m256i, b: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_or_epi64&expand=4046) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vporq))] pub unsafe fn _mm256_mask_or_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let or = _mm256_or_epi64(a, b).as_i64x4(); @@ -25283,6 +27000,7 @@ pub unsafe fn _mm256_mask_or_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_or_epi64&expand=4047) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vporq))] pub unsafe fn _mm256_maskz_or_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let or = _mm256_or_epi64(a, b).as_i64x4(); @@ -25295,6 +27013,7 @@ pub unsafe fn _mm256_maskz_or_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_or_epi64&expand=4045) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vor))] //should be vporq pub unsafe fn _mm_or_epi64(a: __m128i, b: __m128i) -> __m128i { transmute(simd_or(a.as_i64x2(), b.as_i64x2())) @@ -25305,6 +27024,7 @@ pub unsafe fn _mm_or_epi64(a: __m128i, b: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_or_epi64&expand=4043) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vporq))] pub unsafe fn _mm_mask_or_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let or = _mm_or_epi64(a, b).as_i64x2(); @@ -25316,6 +27036,7 @@ pub unsafe fn _mm_mask_or_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_or_epi64&expand=4044) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vporq))] pub unsafe fn _mm_maskz_or_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let or = _mm_or_epi64(a, b).as_i64x2(); @@ -25328,6 +27049,7 @@ pub unsafe fn _mm_maskz_or_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_or_si512&expand=4072) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vporq))] pub unsafe fn _mm512_or_si512(a: __m512i, b: __m512i) -> __m512i { transmute(simd_or(a.as_i32x16(), b.as_i32x16())) @@ -25338,6 +27060,7 @@ pub unsafe fn _mm512_or_si512(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_xor_epi32&expand=6142) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpxorq))] //should be vpxord pub unsafe fn _mm512_xor_epi32(a: __m512i, b: __m512i) -> __m512i { transmute(simd_xor(a.as_i32x16(), b.as_i32x16())) @@ -25348,6 +27071,7 @@ pub unsafe fn _mm512_xor_epi32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_xor_epi32&expand=6140) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpxord))] pub unsafe fn _mm512_mask_xor_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let xor = _mm512_xor_epi32(a, b).as_i32x16(); @@ -25359,6 +27083,7 @@ pub unsafe fn _mm512_mask_xor_epi32(src: __m512i, k: __mmask16, a: __m512i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_xor_epi32&expand=6141) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpxord))] pub unsafe fn _mm512_maskz_xor_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let xor = _mm512_xor_epi32(a, b).as_i32x16(); @@ -25371,6 +27096,7 @@ pub unsafe fn _mm512_maskz_xor_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_xor_epi32&expand=6139) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vxor))] //should be vpxord pub unsafe fn _mm256_xor_epi32(a: __m256i, b: __m256i) -> __m256i { transmute(simd_xor(a.as_i32x8(), b.as_i32x8())) @@ -25381,6 +27107,7 @@ pub unsafe fn _mm256_xor_epi32(a: __m256i, b: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_xor_epi32&expand=6137) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpxord))] pub unsafe fn _mm256_mask_xor_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let xor = _mm256_xor_epi32(a, b).as_i32x8(); @@ -25392,6 +27119,7 @@ pub unsafe fn _mm256_mask_xor_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_xor_epi32&expand=6138) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpxord))] pub unsafe fn _mm256_maskz_xor_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let xor = _mm256_xor_epi32(a, b).as_i32x8(); @@ -25404,6 +27132,7 @@ pub unsafe fn _mm256_maskz_xor_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_xor_epi32&expand=6136) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vxor))] //should be vpxord pub unsafe fn _mm_xor_epi32(a: __m128i, b: __m128i) -> __m128i { transmute(simd_xor(a.as_i32x4(), b.as_i32x4())) @@ -25414,6 +27143,7 @@ pub unsafe fn _mm_xor_epi32(a: __m128i, b: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_xor_epi32&expand=6134) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpxord))] pub unsafe fn _mm_mask_xor_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let xor = _mm_xor_epi32(a, b).as_i32x4(); @@ -25425,6 +27155,7 @@ pub unsafe fn _mm_mask_xor_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_xor_epi32&expand=6135) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpxord))] pub unsafe fn _mm_maskz_xor_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let xor = _mm_xor_epi32(a, b).as_i32x4(); @@ -25437,6 +27168,7 @@ pub unsafe fn _mm_maskz_xor_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_xor_epi64&expand=6151) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpxorq))] pub unsafe fn _mm512_xor_epi64(a: __m512i, b: __m512i) -> __m512i { transmute(simd_xor(a.as_i64x8(), b.as_i64x8())) @@ -25447,6 +27179,7 @@ pub unsafe fn _mm512_xor_epi64(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_xor_epi64&expand=6149) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpxorq))] pub unsafe fn _mm512_mask_xor_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let xor = _mm512_xor_epi64(a, b).as_i64x8(); @@ -25458,6 +27191,7 @@ pub unsafe fn _mm512_mask_xor_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_xor_epi64&expand=6150) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpxorq))] pub unsafe fn _mm512_maskz_xor_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let xor = _mm512_xor_epi64(a, b).as_i64x8(); @@ -25470,6 +27204,7 @@ pub unsafe fn _mm512_maskz_xor_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_xor_epi64&expand=6148) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vxor))] //should be vpxorq pub unsafe fn _mm256_xor_epi64(a: __m256i, b: __m256i) -> __m256i { transmute(simd_xor(a.as_i64x4(), b.as_i64x4())) @@ -25480,6 +27215,7 @@ pub unsafe fn _mm256_xor_epi64(a: __m256i, b: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_xor_epi64&expand=6146) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpxorq))] pub unsafe fn _mm256_mask_xor_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let xor = _mm256_xor_epi64(a, b).as_i64x4(); @@ -25491,6 +27227,7 @@ pub unsafe fn _mm256_mask_xor_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_xor_epi64&expand=6147) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpxorq))] pub unsafe fn _mm256_maskz_xor_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let xor = _mm256_xor_epi64(a, b).as_i64x4(); @@ -25503,6 +27240,7 @@ pub unsafe fn _mm256_maskz_xor_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_xor_epi64&expand=6145) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vxor))] //should be vpxorq pub unsafe fn _mm_xor_epi64(a: __m128i, b: __m128i) -> __m128i { transmute(simd_xor(a.as_i64x2(), b.as_i64x2())) @@ -25513,6 +27251,7 @@ pub unsafe fn _mm_xor_epi64(a: __m128i, b: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_xor_epi64&expand=6143) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpxorq))] pub unsafe fn _mm_mask_xor_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let xor = _mm_xor_epi64(a, b).as_i64x2(); @@ -25524,6 +27263,7 @@ pub unsafe fn _mm_mask_xor_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_xor_epi64&expand=6144) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpxorq))] pub unsafe fn _mm_maskz_xor_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let xor = _mm_xor_epi64(a, b).as_i64x2(); @@ -25536,6 +27276,7 @@ pub unsafe fn _mm_maskz_xor_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_xor_si512&expand=6172) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpxorq))] pub unsafe fn _mm512_xor_si512(a: __m512i, b: __m512i) -> __m512i { transmute(simd_xor(a.as_i32x16(), b.as_i32x16())) @@ -25546,6 +27287,7 @@ pub unsafe fn _mm512_xor_si512(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_andnot_epi32&expand=310) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandnq))] //should be vpandnd pub unsafe fn _mm512_andnot_epi32(a: __m512i, b: __m512i) -> __m512i { _mm512_and_epi32(_mm512_xor_epi32(a, _mm512_set1_epi32(u32::MAX as i32)), b) @@ -25556,6 +27298,7 @@ pub unsafe fn _mm512_andnot_epi32(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_andnot_epi32&expand=311) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandnd))] pub unsafe fn _mm512_mask_andnot_epi32( src: __m512i, @@ -25572,6 +27315,7 @@ pub unsafe fn _mm512_mask_andnot_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_andnot_epi32&expand=312) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandnd))] pub unsafe fn _mm512_maskz_andnot_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m512i { let andnot = _mm512_andnot_epi32(a, b).as_i32x16(); @@ -25584,6 +27328,7 @@ pub unsafe fn _mm512_maskz_andnot_epi32(k: __mmask16, a: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_andnot_epi32&expand=308) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandnd))] pub unsafe fn _mm256_mask_andnot_epi32( src: __m256i, @@ -25601,6 +27346,7 @@ pub unsafe fn _mm256_mask_andnot_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_andnot_epi32&expand=309) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandnd))] pub unsafe fn _mm256_maskz_andnot_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let not = _mm256_xor_epi32(a, _mm256_set1_epi32(u32::MAX as i32)); @@ -25614,6 +27360,7 @@ pub unsafe fn _mm256_maskz_andnot_epi32(k: __mmask8, a: __m256i, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_andnot_epi32&expand=306) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandnd))] pub unsafe fn _mm_mask_andnot_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let not = _mm_xor_epi32(a, _mm_set1_epi32(u32::MAX as i32)); @@ -25626,6 +27373,7 @@ pub unsafe fn _mm_mask_andnot_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_andnot_epi32&expand=307) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandnd))] pub unsafe fn _mm_maskz_andnot_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let not = _mm_xor_epi32(a, _mm_set1_epi32(u32::MAX as i32)); @@ -25639,6 +27387,7 @@ pub unsafe fn _mm_maskz_andnot_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_andnot_epi64&expand=317) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandnq))] //should be vpandnd pub unsafe fn _mm512_andnot_epi64(a: __m512i, b: __m512i) -> __m512i { _mm512_and_epi64(_mm512_xor_epi64(a, _mm512_set1_epi64(u64::MAX as i64)), b) @@ -25649,6 +27398,7 @@ pub unsafe fn _mm512_andnot_epi64(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_andnot_epi64&expand=318) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandnq))] pub unsafe fn _mm512_mask_andnot_epi64( src: __m512i, @@ -25665,6 +27415,7 @@ pub unsafe fn _mm512_mask_andnot_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_andnot_epi64&expand=319) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandnq))] pub unsafe fn _mm512_maskz_andnot_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m512i { let andnot = _mm512_andnot_epi64(a, b).as_i64x8(); @@ -25677,6 +27428,7 @@ pub unsafe fn _mm512_maskz_andnot_epi64(k: __mmask8, a: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_andnot_epi64&expand=315) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandnq))] pub unsafe fn _mm256_mask_andnot_epi64( src: __m256i, @@ -25694,6 +27446,7 @@ pub unsafe fn _mm256_mask_andnot_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_andnot_epi64&expand=316) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandnq))] pub unsafe fn _mm256_maskz_andnot_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m256i { let not = _mm256_xor_epi64(a, _mm256_set1_epi64x(u64::MAX as i64)); @@ -25707,6 +27460,7 @@ pub unsafe fn _mm256_maskz_andnot_epi64(k: __mmask8, a: __m256i, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_andnot_epi64&expand=313) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandnq))] pub unsafe fn _mm_mask_andnot_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let not = _mm_xor_epi64(a, _mm_set1_epi64x(u64::MAX as i64)); @@ -25719,6 +27473,7 @@ pub unsafe fn _mm_mask_andnot_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_andnot_epi64&expand=314) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandnq))] pub unsafe fn _mm_maskz_andnot_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let not = _mm_xor_epi64(a, _mm_set1_epi64x(u64::MAX as i64)); @@ -25732,6 +27487,7 @@ pub unsafe fn _mm_maskz_andnot_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_andnot_si512&expand=340) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpandnq))] pub unsafe fn _mm512_andnot_si512(a: __m512i, b: __m512i) -> __m512i { _mm512_and_epi64(_mm512_xor_epi64(a, _mm512_set1_epi64(u64::MAX as i64)), b) @@ -25742,6 +27498,7 @@ pub unsafe fn _mm512_andnot_si512(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=kand_mask16&expand=3212) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(and))] // generate normal and code instead of kandw pub unsafe fn _kand_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { a & b @@ -25752,6 +27509,7 @@ pub unsafe fn _kand_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_kand&expand=3210) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(and))] // generate normal and code instead of kandw pub unsafe fn _mm512_kand(a: __mmask16, b: __mmask16) -> __mmask16 { a & b @@ -25762,6 +27520,7 @@ pub unsafe fn _mm512_kand(a: __mmask16, b: __mmask16) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=kor_mask16&expand=3239) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(or))] // generate normal or code instead of korw pub unsafe fn _kor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { a | b @@ -25772,6 +27531,7 @@ pub unsafe fn _kor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_kor&expand=3237) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(or))] // generate normal or code instead of korw pub unsafe fn _mm512_kor(a: __mmask16, b: __mmask16) -> __mmask16 { a | b @@ -25782,6 +27542,7 @@ pub unsafe fn _mm512_kor(a: __mmask16, b: __mmask16) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=kxor_mask16&expand=3291) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(xor))] // generate normal xor code instead of kxorw pub unsafe fn _kxor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { a ^ b @@ -25792,6 +27553,7 @@ pub unsafe fn _kxor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_kxor&expand=3289) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(xor))] // generate normal xor code instead of kxorw pub unsafe fn _mm512_kxor(a: __mmask16, b: __mmask16) -> __mmask16 { a ^ b @@ -25802,6 +27564,7 @@ pub unsafe fn _mm512_kxor(a: __mmask16, b: __mmask16) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=knot_mask16&expand=3233) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _knot_mask16(a: __mmask16) -> __mmask16 { a ^ 0b11111111_11111111 } @@ -25811,6 +27574,7 @@ pub unsafe fn _knot_mask16(a: __mmask16) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_knot&expand=3231) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_knot(a: __mmask16) -> __mmask16 { a ^ 0b11111111_11111111 } @@ -25820,6 +27584,7 @@ pub unsafe fn _mm512_knot(a: __mmask16) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=kandn_mask16&expand=3218) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(not))] // generate normal and, not code instead of kandnw pub unsafe fn _kandn_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { _mm512_kand(_mm512_knot(a), b) @@ -25830,6 +27595,7 @@ pub unsafe fn _kandn_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_kandn&expand=3216) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(not))] // generate normal and code instead of kandw pub unsafe fn _mm512_kandn(a: __mmask16, b: __mmask16) -> __mmask16 { _mm512_kand(_mm512_knot(a), b) @@ -25840,6 +27606,7 @@ pub unsafe fn _mm512_kandn(a: __mmask16, b: __mmask16) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=kxnor_mask16&expand=3285) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(xor))] // generate normal xor, not code instead of kxnorw pub unsafe fn _kxnor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { _mm512_knot(_mm512_kxor(a, b)) @@ -25850,6 +27617,7 @@ pub unsafe fn _kxnor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_kxnor&expand=3283) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(xor))] // generate normal and code instead of kandw pub unsafe fn _mm512_kxnor(a: __mmask16, b: __mmask16) -> __mmask16 { _mm512_knot(_mm512_kxor(a, b)) @@ -25860,6 +27628,7 @@ pub unsafe fn _mm512_kxnor(a: __mmask16, b: __mmask16) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm512_kmov&expand=3228) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(mov))] // generate normal and code instead of kmovw pub unsafe fn _mm512_kmov(a: __mmask16) -> __mmask16 { a @@ -25870,6 +27639,7 @@ pub unsafe fn _mm512_kmov(a: __mmask16) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_int2mask&expand=3189) #[inline] #[target_feature(enable = "avx512f")] // generate normal and code instead of kmovw +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_int2mask(mask: i32) -> __mmask16 { mask as u16 } @@ -25879,6 +27649,7 @@ pub unsafe fn _mm512_int2mask(mask: i32) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mask2int&expand=3544) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(mov))] // generate normal and code instead of kmovw pub unsafe fn _mm512_mask2int(k1: __mmask16) -> i32 { k1 as i32 @@ -25889,6 +27660,7 @@ pub unsafe fn _mm512_mask2int(k1: __mmask16) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_kunpackb&expand=3280) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(mov))] // generate normal and code instead of kunpckbw pub unsafe fn _mm512_kunpackb(a: __mmask16, b: __mmask16) -> __mmask16 { let a = a & 0b00000000_11111111; @@ -25901,6 +27673,7 @@ pub unsafe fn _mm512_kunpackb(a: __mmask16, b: __mmask16) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_kortestc&expand=3247) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(cmp))] // generate normal and code instead of kortestw pub unsafe fn _mm512_kortestc(a: __mmask16, b: __mmask16) -> i32 { let r = a | b; @@ -25916,6 +27689,7 @@ pub unsafe fn _mm512_kortestc(a: __mmask16, b: __mmask16) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_test_epi32_mask&expand=5890) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmd))] pub unsafe fn _mm512_test_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { let and = _mm512_and_epi32(a, b); @@ -25928,6 +27702,7 @@ pub unsafe fn _mm512_test_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_test_epi32_mask&expand=5889) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmd))] pub unsafe fn _mm512_mask_test_epi32_mask(k: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { let and = _mm512_and_epi32(a, b); @@ -25940,6 +27715,7 @@ pub unsafe fn _mm512_mask_test_epi32_mask(k: __mmask16, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_test_epi32_mask&expand=5888) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmd))] pub unsafe fn _mm256_test_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { let and = _mm256_and_si256(a, b); @@ -25952,6 +27728,7 @@ pub unsafe fn _mm256_test_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_test_epi32_mask&expand=5887) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmd))] pub unsafe fn _mm256_mask_test_epi32_mask(k: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { let and = _mm256_and_si256(a, b); @@ -25964,6 +27741,7 @@ pub unsafe fn _mm256_mask_test_epi32_mask(k: __mmask8, a: __m256i, b: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_epi32_mask&expand=5886) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmd))] pub unsafe fn _mm_test_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); @@ -25976,6 +27754,7 @@ pub unsafe fn _mm_test_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_test_epi32_mask&expand=5885) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmd))] pub unsafe fn _mm_mask_test_epi32_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); @@ -25988,6 +27767,7 @@ pub unsafe fn _mm_mask_test_epi32_mask(k: __mmask8, a: __m128i, b: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_test_epi64_mask&expand=5896) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmq))] pub unsafe fn _mm512_test_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { let and = _mm512_and_epi64(a, b); @@ -26000,6 +27780,7 @@ pub unsafe fn _mm512_test_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_test_epi64_mask&expand=5895) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmq))] pub unsafe fn _mm512_mask_test_epi64_mask(k: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { let and = _mm512_and_epi64(a, b); @@ -26012,6 +27793,7 @@ pub unsafe fn _mm512_mask_test_epi64_mask(k: __mmask8, a: __m512i, b: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_test_epi64_mask&expand=5894) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmq))] pub unsafe fn _mm256_test_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { let and = _mm256_and_si256(a, b); @@ -26024,6 +27806,7 @@ pub unsafe fn _mm256_test_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_test_epi64_mask&expand=5893) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmq))] pub unsafe fn _mm256_mask_test_epi64_mask(k: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { let and = _mm256_and_si256(a, b); @@ -26036,6 +27819,7 @@ pub unsafe fn _mm256_mask_test_epi64_mask(k: __mmask8, a: __m256i, b: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_epi64_mask&expand=5892) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmq))] pub unsafe fn _mm_test_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); @@ -26048,6 +27832,7 @@ pub unsafe fn _mm_test_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_test_epi64_mask&expand=5891) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestmq))] pub unsafe fn _mm_mask_test_epi64_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); @@ -26060,6 +27845,7 @@ pub unsafe fn _mm_mask_test_epi64_mask(k: __mmask8, a: __m128i, b: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_testn_epi32_mask&expand=5921) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmd))] pub unsafe fn _mm512_testn_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { let and = _mm512_and_epi32(a, b); @@ -26072,6 +27858,7 @@ pub unsafe fn _mm512_testn_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_testn_epi32_mask&expand=5920) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmd))] pub unsafe fn _mm512_mask_testn_epi32_mask(k: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { let and = _mm512_and_epi32(a, b); @@ -26084,6 +27871,7 @@ pub unsafe fn _mm512_mask_testn_epi32_mask(k: __mmask16, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_testn_epi32_mask&expand=5919) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmd))] pub unsafe fn _mm256_testn_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { let and = _mm256_and_si256(a, b); @@ -26096,6 +27884,7 @@ pub unsafe fn _mm256_testn_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_testn_epi32_mask&expand=5918) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmd))] pub unsafe fn _mm256_mask_testn_epi32_mask(k: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { let and = _mm256_and_si256(a, b); @@ -26108,6 +27897,7 @@ pub unsafe fn _mm256_mask_testn_epi32_mask(k: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testn_epi32_mask&expand=5917) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmd))] pub unsafe fn _mm_testn_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); @@ -26120,6 +27910,7 @@ pub unsafe fn _mm_testn_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_testn_epi32_mask&expand=5916) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmd))] pub unsafe fn _mm_mask_testn_epi32_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); @@ -26132,6 +27923,7 @@ pub unsafe fn _mm_mask_testn_epi32_mask(k: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_testn_epi64_mask&expand=5927) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmq))] pub unsafe fn _mm512_testn_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { let and = _mm512_and_epi64(a, b); @@ -26144,6 +27936,7 @@ pub unsafe fn _mm512_testn_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_testn_epi64_mask&expand=5926) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmq))] pub unsafe fn _mm512_mask_testn_epi64_mask(k: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { let and = _mm512_and_epi64(a, b); @@ -26156,6 +27949,7 @@ pub unsafe fn _mm512_mask_testn_epi64_mask(k: __mmask8, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_testn_epi64_mask&expand=5925) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmq))] pub unsafe fn _mm256_testn_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { let and = _mm256_and_si256(a, b); @@ -26168,6 +27962,7 @@ pub unsafe fn _mm256_testn_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_testn_epi64_mask&expand=5924) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmq))] pub unsafe fn _mm256_mask_testn_epi64_mask(k: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { let and = _mm256_and_si256(a, b); @@ -26180,6 +27975,7 @@ pub unsafe fn _mm256_mask_testn_epi64_mask(k: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testn_epi64_mask&expand=5923) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmq))] pub unsafe fn _mm_testn_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); @@ -26192,6 +27988,7 @@ pub unsafe fn _mm_testn_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_testn_epi64_mask&expand=5922) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vptestnmq))] pub unsafe fn _mm_mask_testn_epi64_mask(k: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { let and = _mm_and_si128(a, b); @@ -26204,6 +28001,7 @@ pub unsafe fn _mm_mask_testn_epi64_mask(k: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_stream_ps&expand=5671) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovntps))] #[allow(clippy::cast_ptr_alignment)] pub unsafe fn _mm512_stream_ps(mem_addr: *mut f32, a: __m512) { @@ -26215,6 +28013,7 @@ pub unsafe fn _mm512_stream_ps(mem_addr: *mut f32, a: __m512) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_stream_pd&expand=5667) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovntps))] //should be vmovntpd #[allow(clippy::cast_ptr_alignment)] pub unsafe fn _mm512_stream_pd(mem_addr: *mut f64, a: __m512d) { @@ -26226,6 +28025,7 @@ pub unsafe fn _mm512_stream_pd(mem_addr: *mut f64, a: __m512d) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_stream_si512&expand=5675) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovntps))] //should be vmovntdq #[allow(clippy::cast_ptr_alignment)] pub unsafe fn _mm512_stream_si512(mem_addr: *mut i64, a: __m512i) { @@ -26237,6 +28037,7 @@ pub unsafe fn _mm512_stream_si512(mem_addr: *mut i64, a: __m512i) { /// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set_ps&expand=4931) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_set_ps( e0: f32, e1: f32, @@ -26266,6 +28067,7 @@ pub unsafe fn _mm512_set_ps( /// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setr_ps&expand=5008) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_setr_ps( e0: f32, e1: f32, @@ -26295,6 +28097,7 @@ pub unsafe fn _mm512_setr_ps( /// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set1_pd&expand=4975) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_set1_pd(a: f64) -> __m512d { transmute(f64x8::splat(a)) } @@ -26304,6 +28107,7 @@ pub unsafe fn _mm512_set1_pd(a: f64) -> __m512d { /// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set1_ps&expand=4981) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_set1_ps(a: f32) -> __m512 { transmute(f32x16::splat(a)) } @@ -26313,6 +28117,7 @@ pub unsafe fn _mm512_set1_ps(a: f32) -> __m512 { /// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set_epi32&expand=4908) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_set_epi32( e15: i32, e14: i32, @@ -26341,6 +28146,7 @@ pub unsafe fn _mm512_set_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set1_epi8&expand=4972) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_set1_epi8(a: i8) -> __m512i { transmute(i8x64::splat(a)) } @@ -26350,6 +28156,7 @@ pub unsafe fn _mm512_set1_epi8(a: i8) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set1_epi16&expand=4944) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_set1_epi16(a: i16) -> __m512i { transmute(i16x32::splat(a)) } @@ -26357,6 +28164,7 @@ pub unsafe fn _mm512_set1_epi16(a: i16) -> __m512i { /// Broadcast 32-bit integer `a` to all elements of `dst`. #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_set1_epi32(a: i32) -> __m512i { transmute(i32x16::splat(a)) } @@ -26366,6 +28174,7 @@ pub unsafe fn _mm512_set1_epi32(a: i32) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_set1_epi32&expand=4951) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastd))] pub unsafe fn _mm512_mask_set1_epi32(src: __m512i, k: __mmask16, a: i32) -> __m512i { let r = _mm512_set1_epi32(a).as_i32x16(); @@ -26377,6 +28186,7 @@ pub unsafe fn _mm512_mask_set1_epi32(src: __m512i, k: __mmask16, a: i32) -> __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_set1_epi32&expand=4952) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastd))] pub unsafe fn _mm512_maskz_set1_epi32(k: __mmask16, a: i32) -> __m512i { let r = _mm512_set1_epi32(a).as_i32x16(); @@ -26389,6 +28199,7 @@ pub unsafe fn _mm512_maskz_set1_epi32(k: __mmask16, a: i32) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_set1_epi32&expand=4948) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastd))] pub unsafe fn _mm256_mask_set1_epi32(src: __m256i, k: __mmask8, a: i32) -> __m256i { let r = _mm256_set1_epi32(a).as_i32x8(); @@ -26400,6 +28211,7 @@ pub unsafe fn _mm256_mask_set1_epi32(src: __m256i, k: __mmask8, a: i32) -> __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_set1_epi32&expand=4949) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastd))] pub unsafe fn _mm256_maskz_set1_epi32(k: __mmask8, a: i32) -> __m256i { let r = _mm256_set1_epi32(a).as_i32x8(); @@ -26412,6 +28224,7 @@ pub unsafe fn _mm256_maskz_set1_epi32(k: __mmask8, a: i32) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_set1_epi32&expand=4945) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastd))] pub unsafe fn _mm_mask_set1_epi32(src: __m128i, k: __mmask8, a: i32) -> __m128i { let r = _mm_set1_epi32(a).as_i32x4(); @@ -26423,6 +28236,7 @@ pub unsafe fn _mm_mask_set1_epi32(src: __m128i, k: __mmask8, a: i32) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_set1_epi32&expand=4946) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastd))] pub unsafe fn _mm_maskz_set1_epi32(k: __mmask8, a: i32) -> __m128i { let r = _mm_set1_epi32(a).as_i32x4(); @@ -26435,6 +28249,7 @@ pub unsafe fn _mm_maskz_set1_epi32(k: __mmask8, a: i32) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set1_epi64&expand=4961) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_set1_epi64(a: i64) -> __m512i { transmute(i64x8::splat(a)) } @@ -26444,6 +28259,7 @@ pub unsafe fn _mm512_set1_epi64(a: i64) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_set1_epi64&expand=4959) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastq))] pub unsafe fn _mm512_mask_set1_epi64(src: __m512i, k: __mmask8, a: i64) -> __m512i { let r = _mm512_set1_epi64(a).as_i64x8(); @@ -26455,6 +28271,7 @@ pub unsafe fn _mm512_mask_set1_epi64(src: __m512i, k: __mmask8, a: i64) -> __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_set1_epi64&expand=4960) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastq))] pub unsafe fn _mm512_maskz_set1_epi64(k: __mmask8, a: i64) -> __m512i { let r = _mm512_set1_epi64(a).as_i64x8(); @@ -26467,6 +28284,7 @@ pub unsafe fn _mm512_maskz_set1_epi64(k: __mmask8, a: i64) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_set1_epi64&expand=4957) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastq))] pub unsafe fn _mm256_mask_set1_epi64(src: __m256i, k: __mmask8, a: i64) -> __m256i { let r = _mm256_set1_epi64x(a).as_i64x4(); @@ -26478,6 +28296,7 @@ pub unsafe fn _mm256_mask_set1_epi64(src: __m256i, k: __mmask8, a: i64) -> __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_set1_epi64&expand=4958) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastq))] pub unsafe fn _mm256_maskz_set1_epi64(k: __mmask8, a: i64) -> __m256i { let r = _mm256_set1_epi64x(a).as_i64x4(); @@ -26490,6 +28309,7 @@ pub unsafe fn _mm256_maskz_set1_epi64(k: __mmask8, a: i64) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_set1_epi64&expand=4954) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastq))] pub unsafe fn _mm_mask_set1_epi64(src: __m128i, k: __mmask8, a: i64) -> __m128i { let r = _mm_set1_epi64x(a).as_i64x2(); @@ -26501,6 +28321,7 @@ pub unsafe fn _mm_mask_set1_epi64(src: __m128i, k: __mmask8, a: i64) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_set1_epi64&expand=4955) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpbroadcastq))] pub unsafe fn _mm_maskz_set1_epi64(k: __mmask8, a: i64) -> __m128i { let r = _mm_set1_epi64x(a).as_i64x2(); @@ -26513,6 +28334,7 @@ pub unsafe fn _mm_maskz_set1_epi64(k: __mmask8, a: i64) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set4_epi64&expand=4983) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_set4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i { let r = i64x8::new(d, c, b, a, d, c, b, a); transmute(r) @@ -26523,6 +28345,7 @@ pub unsafe fn _mm512_set4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setr4_epi64&expand=5010) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_setr4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i { let r = i64x8::new(a, b, c, d, a, b, c, d); transmute(r) @@ -26533,6 +28356,7 @@ pub unsafe fn _mm512_setr4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmplt_ps_mask&expand=1074) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps pub unsafe fn _mm512_cmplt_ps_mask(a: __m512, b: __m512) -> __mmask16 { _mm512_cmp_ps_mask::<_CMP_LT_OS>(a, b) @@ -26543,6 +28367,7 @@ pub unsafe fn _mm512_cmplt_ps_mask(a: __m512, b: __m512) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_ps_mask&expand=1075) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps pub unsafe fn _mm512_mask_cmplt_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> __mmask16 { _mm512_mask_cmp_ps_mask::<_CMP_LT_OS>(k1, a, b) @@ -26553,6 +28378,7 @@ pub unsafe fn _mm512_mask_cmplt_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpnlt_ps_mask&expand=1154) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps pub unsafe fn _mm512_cmpnlt_ps_mask(a: __m512, b: __m512) -> __mmask16 { _mm512_cmp_ps_mask::<_CMP_NLT_US>(a, b) @@ -26563,6 +28389,7 @@ pub unsafe fn _mm512_cmpnlt_ps_mask(a: __m512, b: __m512) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpnlt_ps_mask&expand=1155) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps pub unsafe fn _mm512_mask_cmpnlt_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> __mmask16 { _mm512_mask_cmp_ps_mask::<_CMP_NLT_US>(k1, a, b) @@ -26573,6 +28400,7 @@ pub unsafe fn _mm512_mask_cmpnlt_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_ps_mask&expand=1013) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps pub unsafe fn _mm512_cmple_ps_mask(a: __m512, b: __m512) -> __mmask16 { _mm512_cmp_ps_mask::<_CMP_LE_OS>(a, b) @@ -26583,6 +28411,7 @@ pub unsafe fn _mm512_cmple_ps_mask(a: __m512, b: __m512) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_ps_mask&expand=1014) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps pub unsafe fn _mm512_mask_cmple_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> __mmask16 { _mm512_mask_cmp_ps_mask::<_CMP_LE_OS>(k1, a, b) @@ -26593,6 +28422,7 @@ pub unsafe fn _mm512_mask_cmple_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpnle_ps_mask&expand=1146) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps pub unsafe fn _mm512_cmpnle_ps_mask(a: __m512, b: __m512) -> __mmask16 { _mm512_cmp_ps_mask::<_CMP_NLE_US>(a, b) @@ -26603,6 +28433,7 @@ pub unsafe fn _mm512_cmpnle_ps_mask(a: __m512, b: __m512) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpnle_ps_mask&expand=1147) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps pub unsafe fn _mm512_mask_cmpnle_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> __mmask16 { _mm512_mask_cmp_ps_mask::<_CMP_NLE_US>(k1, a, b) @@ -26613,6 +28444,7 @@ pub unsafe fn _mm512_mask_cmpnle_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_ps_mask&expand=828) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps pub unsafe fn _mm512_cmpeq_ps_mask(a: __m512, b: __m512) -> __mmask16 { _mm512_cmp_ps_mask::<_CMP_EQ_OQ>(a, b) @@ -26623,6 +28455,7 @@ pub unsafe fn _mm512_cmpeq_ps_mask(a: __m512, b: __m512) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_ps_mask&expand=829) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps pub unsafe fn _mm512_mask_cmpeq_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> __mmask16 { _mm512_mask_cmp_ps_mask::<_CMP_EQ_OQ>(k1, a, b) @@ -26633,6 +28466,7 @@ pub unsafe fn _mm512_mask_cmpeq_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_ps_mask&expand=1130) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps pub unsafe fn _mm512_cmpneq_ps_mask(a: __m512, b: __m512) -> __mmask16 { _mm512_cmp_ps_mask::<_CMP_NEQ_UQ>(a, b) @@ -26643,6 +28477,7 @@ pub unsafe fn _mm512_cmpneq_ps_mask(a: __m512, b: __m512) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_ps_mask&expand=1131) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps pub unsafe fn _mm512_mask_cmpneq_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> __mmask16 { _mm512_mask_cmp_ps_mask::<_CMP_NEQ_UQ>(k1, a, b) @@ -26653,6 +28488,7 @@ pub unsafe fn _mm512_mask_cmpneq_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_ps_mask&expand=749) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm512_cmp_ps_mask(a: __m512, b: __m512) -> __mmask16 { @@ -26669,6 +28505,7 @@ pub unsafe fn _mm512_cmp_ps_mask(a: __m512, b: __m512) -> __mma /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_ps_mask&expand=750) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm512_mask_cmp_ps_mask( @@ -26688,6 +28525,7 @@ pub unsafe fn _mm512_mask_cmp_ps_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_ps_mask&expand=747) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm256_cmp_ps_mask(a: __m256, b: __m256) -> __mmask8 { @@ -26704,6 +28542,7 @@ pub unsafe fn _mm256_cmp_ps_mask(a: __m256, b: __m256) -> __mma /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_ps_mask&expand=748) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm256_mask_cmp_ps_mask( @@ -26723,6 +28562,7 @@ pub unsafe fn _mm256_mask_cmp_ps_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_ps_mask&expand=745) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm_cmp_ps_mask(a: __m128, b: __m128) -> __mmask8 { @@ -26739,6 +28579,7 @@ pub unsafe fn _mm_cmp_ps_mask(a: __m128, b: __m128) -> __mmask8 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_ps_mask&expand=746) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm_mask_cmp_ps_mask( @@ -26759,6 +28600,7 @@ pub unsafe fn _mm_mask_cmp_ps_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_round_ps_mask&expand=753) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))] #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm512_cmp_round_ps_mask( @@ -26780,6 +28622,7 @@ pub unsafe fn _mm512_cmp_round_ps_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_round_ps_mask&expand=754) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm512_mask_cmp_round_ps_mask( @@ -26800,6 +28643,7 @@ pub unsafe fn _mm512_mask_cmp_round_ps_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpord_ps_mask&expand=1162) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmps pub unsafe fn _mm512_cmpord_ps_mask(a: __m512, b: __m512) -> __mmask16 { _mm512_cmp_ps_mask::<_CMP_ORD_Q>(a, b) @@ -26810,6 +28654,7 @@ pub unsafe fn _mm512_cmpord_ps_mask(a: __m512, b: __m512) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpord_ps_mask&expand=1163) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps pub unsafe fn _mm512_mask_cmpord_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> __mmask16 { _mm512_mask_cmp_ps_mask::<_CMP_ORD_Q>(k1, a, b) @@ -26820,6 +28665,7 @@ pub unsafe fn _mm512_mask_cmpord_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpunord_ps_mask&expand=1170) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps pub unsafe fn _mm512_cmpunord_ps_mask(a: __m512, b: __m512) -> __mmask16 { _mm512_cmp_ps_mask::<_CMP_UNORD_Q>(a, b) @@ -26830,6 +28676,7 @@ pub unsafe fn _mm512_cmpunord_ps_mask(a: __m512, b: __m512) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpunord_ps_mask&expand=1171) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps pub unsafe fn _mm512_mask_cmpunord_ps_mask(k1: __mmask16, a: __m512, b: __m512) -> __mmask16 { _mm512_mask_cmp_ps_mask::<_CMP_UNORD_Q>(k1, a, b) @@ -26840,6 +28687,7 @@ pub unsafe fn _mm512_mask_cmpunord_ps_mask(k1: __mmask16, a: __m512, b: __m512) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmplt_pd_mask&expand=1071) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd pub unsafe fn _mm512_cmplt_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { _mm512_cmp_pd_mask::<_CMP_LT_OS>(a, b) @@ -26850,6 +28698,7 @@ pub unsafe fn _mm512_cmplt_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_pd_mask&expand=1072) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd pub unsafe fn _mm512_mask_cmplt_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) -> __mmask8 { _mm512_mask_cmp_pd_mask::<_CMP_LT_OS>(k1, a, b) @@ -26860,6 +28709,7 @@ pub unsafe fn _mm512_mask_cmplt_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpnlt_pd_mask&expand=1151) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd pub unsafe fn _mm512_cmpnlt_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { _mm512_cmp_pd_mask::<_CMP_NLT_US>(a, b) @@ -26870,6 +28720,7 @@ pub unsafe fn _mm512_cmpnlt_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpnlt_pd_mask&expand=1152) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd pub unsafe fn _mm512_mask_cmpnlt_pd_mask(m: __mmask8, a: __m512d, b: __m512d) -> __mmask8 { _mm512_mask_cmp_pd_mask::<_CMP_NLT_US>(m, a, b) @@ -26880,6 +28731,7 @@ pub unsafe fn _mm512_mask_cmpnlt_pd_mask(m: __mmask8, a: __m512d, b: __m512d) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_pd_mask&expand=1010) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd pub unsafe fn _mm512_cmple_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { _mm512_cmp_pd_mask::<_CMP_LE_OS>(a, b) @@ -26890,6 +28742,7 @@ pub unsafe fn _mm512_cmple_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_pd_mask&expand=1011) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd pub unsafe fn _mm512_mask_cmple_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) -> __mmask8 { _mm512_mask_cmp_pd_mask::<_CMP_LE_OS>(k1, a, b) @@ -26900,6 +28753,7 @@ pub unsafe fn _mm512_mask_cmple_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpnle_pd_mask&expand=1143) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd pub unsafe fn _mm512_cmpnle_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { _mm512_cmp_pd_mask::<_CMP_NLE_US>(a, b) @@ -26910,6 +28764,7 @@ pub unsafe fn _mm512_cmpnle_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpnle_pd_mask&expand=1144) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd pub unsafe fn _mm512_mask_cmpnle_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) -> __mmask8 { _mm512_mask_cmp_pd_mask::<_CMP_NLE_US>(k1, a, b) @@ -26920,6 +28775,7 @@ pub unsafe fn _mm512_mask_cmpnle_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_pd_mask&expand=822) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd pub unsafe fn _mm512_cmpeq_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { _mm512_cmp_pd_mask::<_CMP_EQ_OQ>(a, b) @@ -26930,6 +28786,7 @@ pub unsafe fn _mm512_cmpeq_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_pd_mask&expand=823) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd pub unsafe fn _mm512_mask_cmpeq_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) -> __mmask8 { _mm512_mask_cmp_pd_mask::<_CMP_EQ_OQ>(k1, a, b) @@ -26940,6 +28797,7 @@ pub unsafe fn _mm512_mask_cmpeq_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_pd_mask&expand=1127) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd pub unsafe fn _mm512_cmpneq_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { _mm512_cmp_pd_mask::<_CMP_NEQ_UQ>(a, b) @@ -26950,6 +28808,7 @@ pub unsafe fn _mm512_cmpneq_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_pd_mask&expand=1128) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd pub unsafe fn _mm512_mask_cmpneq_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) -> __mmask8 { _mm512_mask_cmp_pd_mask::<_CMP_NEQ_UQ>(k1, a, b) @@ -26960,6 +28819,7 @@ pub unsafe fn _mm512_mask_cmpneq_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_pd_mask&expand=741) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm512_cmp_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { @@ -26976,6 +28836,7 @@ pub unsafe fn _mm512_cmp_pd_mask(a: __m512d, b: __m512d) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_pd_mask&expand=742) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm512_mask_cmp_pd_mask( @@ -26995,6 +28856,7 @@ pub unsafe fn _mm512_mask_cmp_pd_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_pd_mask&expand=739) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm256_cmp_pd_mask(a: __m256d, b: __m256d) -> __mmask8 { @@ -27011,6 +28873,7 @@ pub unsafe fn _mm256_cmp_pd_mask(a: __m256d, b: __m256d) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_pd_mask&expand=740) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm256_mask_cmp_pd_mask( @@ -27030,6 +28893,7 @@ pub unsafe fn _mm256_mask_cmp_pd_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_pd_mask&expand=737) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm_cmp_pd_mask(a: __m128d, b: __m128d) -> __mmask8 { @@ -27046,6 +28910,7 @@ pub unsafe fn _mm_cmp_pd_mask(a: __m128d, b: __m128d) -> __mmas /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_pd_mask&expand=738) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm_mask_cmp_pd_mask( @@ -27066,6 +28931,7 @@ pub unsafe fn _mm_mask_cmp_pd_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_round_pd_mask&expand=751) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))] #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm512_cmp_round_pd_mask( @@ -27087,6 +28953,7 @@ pub unsafe fn _mm512_cmp_round_pd_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_round_pd_mask&expand=752) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm512_mask_cmp_round_pd_mask( @@ -27107,6 +28974,7 @@ pub unsafe fn _mm512_mask_cmp_round_pd_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpord_pd_mask&expand=1159) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd pub unsafe fn _mm512_cmpord_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { _mm512_cmp_pd_mask::<_CMP_ORD_Q>(a, b) @@ -27117,6 +28985,7 @@ pub unsafe fn _mm512_cmpord_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpord_pd_mask&expand=1160) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd pub unsafe fn _mm512_mask_cmpord_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) -> __mmask8 { _mm512_mask_cmp_pd_mask::<_CMP_ORD_Q>(k1, a, b) @@ -27127,6 +28996,7 @@ pub unsafe fn _mm512_mask_cmpord_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpunord_pd_mask&expand=1167) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd pub unsafe fn _mm512_cmpunord_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { _mm512_cmp_pd_mask::<_CMP_UNORD_Q>(a, b) @@ -27137,6 +29007,7 @@ pub unsafe fn _mm512_cmpunord_pd_mask(a: __m512d, b: __m512d) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpunord_pd_mask&expand=1168) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd pub unsafe fn _mm512_mask_cmpunord_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) -> __mmask8 { _mm512_mask_cmp_pd_mask::<_CMP_UNORD_Q>(k1, a, b) @@ -27147,6 +29018,7 @@ pub unsafe fn _mm512_mask_cmpunord_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_ss_mask&expand=763) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm_cmp_ss_mask(a: __m128, b: __m128) -> __mmask8 { @@ -27161,6 +29033,7 @@ pub unsafe fn _mm_cmp_ss_mask(a: __m128, b: __m128) -> __mmask8 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_ss_mask&expand=764) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm_mask_cmp_ss_mask( @@ -27179,6 +29052,7 @@ pub unsafe fn _mm_mask_cmp_ss_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_round_ss_mask&expand=757) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))] #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm_cmp_round_ss_mask( @@ -27198,6 +29072,7 @@ pub unsafe fn _mm_cmp_round_ss_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_round_ss_mask&expand=758) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm_mask_cmp_round_ss_mask( @@ -27216,6 +29091,7 @@ pub unsafe fn _mm_mask_cmp_round_ss_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_sd_mask&expand=760) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm_cmp_sd_mask(a: __m128d, b: __m128d) -> __mmask8 { @@ -27230,6 +29106,7 @@ pub unsafe fn _mm_cmp_sd_mask(a: __m128d, b: __m128d) -> __mmas /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_sd_mask&expand=761) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))] pub unsafe fn _mm_mask_cmp_sd_mask( @@ -27248,6 +29125,7 @@ pub unsafe fn _mm_mask_cmp_sd_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_round_sd_mask&expand=755) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))] #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm_cmp_round_sd_mask( @@ -27267,6 +29145,7 @@ pub unsafe fn _mm_cmp_round_sd_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_round_sd_mask&expand=756) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm_mask_cmp_round_sd_mask( @@ -27285,6 +29164,7 @@ pub unsafe fn _mm_mask_cmp_round_sd_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmplt_epu32_mask&expand=1056) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm512_cmplt_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { simd_bitmask::(simd_lt(a.as_u32x16(), b.as_u32x16())) @@ -27295,6 +29175,7 @@ pub unsafe fn _mm512_cmplt_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_epu32_mask&expand=1057) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm512_mask_cmplt_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_cmplt_epu32_mask(a, b) & k1 @@ -27305,6 +29186,7 @@ pub unsafe fn _mm512_mask_cmplt_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmplt_epu32_mask&expand=1054) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm256_cmplt_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::(simd_lt(a.as_u32x8(), b.as_u32x8())) @@ -27315,6 +29197,7 @@ pub unsafe fn _mm256_cmplt_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmplt_epu32_mask&expand=1055) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm256_mask_cmplt_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmplt_epu32_mask(a, b) & k1 @@ -27325,6 +29208,7 @@ pub unsafe fn _mm256_mask_cmplt_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epu32_mask&expand=1052) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm_cmplt_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_lt(a.as_u32x4(), b.as_u32x4())) @@ -27335,6 +29219,7 @@ pub unsafe fn _mm_cmplt_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmplt_epu32_mask&expand=1053) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm_mask_cmplt_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmplt_epu32_mask(a, b) & k1 @@ -27345,6 +29230,7 @@ pub unsafe fn _mm_mask_cmplt_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpgt_epu32_mask&expand=933) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm512_cmpgt_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { simd_bitmask::(simd_gt(a.as_u32x16(), b.as_u32x16())) @@ -27355,6 +29241,7 @@ pub unsafe fn _mm512_cmpgt_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpgt_epu32_mask&expand=934) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm512_mask_cmpgt_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_cmpgt_epu32_mask(a, b) & k1 @@ -27365,6 +29252,7 @@ pub unsafe fn _mm512_mask_cmpgt_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epu32_mask&expand=931) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm256_cmpgt_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::(simd_gt(a.as_u32x8(), b.as_u32x8())) @@ -27375,6 +29263,7 @@ pub unsafe fn _mm256_cmpgt_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpgt_epu32_mask&expand=932) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm256_mask_cmpgt_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmpgt_epu32_mask(a, b) & k1 @@ -27385,6 +29274,7 @@ pub unsafe fn _mm256_mask_cmpgt_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epu32_mask&expand=929) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm_cmpgt_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_gt(a.as_u32x4(), b.as_u32x4())) @@ -27395,6 +29285,7 @@ pub unsafe fn _mm_cmpgt_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpgt_epu32_mask&expand=930) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm_mask_cmpgt_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpgt_epu32_mask(a, b) & k1 @@ -27405,6 +29296,7 @@ pub unsafe fn _mm_mask_cmpgt_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_epu32_mask&expand=995) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm512_cmple_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { simd_bitmask::(simd_le(a.as_u32x16(), b.as_u32x16())) @@ -27415,6 +29307,7 @@ pub unsafe fn _mm512_cmple_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_epu32_mask&expand=996) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm512_mask_cmple_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_cmple_epu32_mask(a, b) & k1 @@ -27425,6 +29318,7 @@ pub unsafe fn _mm512_mask_cmple_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmple_epu32_mask&expand=993) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm256_cmple_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::(simd_le(a.as_u32x8(), b.as_u32x8())) @@ -27435,6 +29329,7 @@ pub unsafe fn _mm256_cmple_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmple_epu32_mask&expand=994) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm256_mask_cmple_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmple_epu32_mask(a, b) & k1 @@ -27445,6 +29340,7 @@ pub unsafe fn _mm256_mask_cmple_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_epu32_mask&expand=991) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm_cmple_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_le(a.as_u32x4(), b.as_u32x4())) @@ -27455,6 +29351,7 @@ pub unsafe fn _mm_cmple_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmple_epu32_mask&expand=992) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm_mask_cmple_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmple_epu32_mask(a, b) & k1 @@ -27465,6 +29362,7 @@ pub unsafe fn _mm_mask_cmple_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpge_epu32_mask&expand=873) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm512_cmpge_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { simd_bitmask::(simd_ge(a.as_u32x16(), b.as_u32x16())) @@ -27475,6 +29373,7 @@ pub unsafe fn _mm512_cmpge_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpge_epu32_mask&expand=874) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm512_mask_cmpge_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_cmpge_epu32_mask(a, b) & k1 @@ -27485,6 +29384,7 @@ pub unsafe fn _mm512_mask_cmpge_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpge_epu32_mask&expand=871) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm256_cmpge_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::(simd_ge(a.as_u32x8(), b.as_u32x8())) @@ -27495,6 +29395,7 @@ pub unsafe fn _mm256_cmpge_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpge_epu32_mask&expand=872) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm256_mask_cmpge_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmpge_epu32_mask(a, b) & k1 @@ -27505,6 +29406,7 @@ pub unsafe fn _mm256_mask_cmpge_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_epu32_mask&expand=869) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm_cmpge_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_ge(a.as_u32x4(), b.as_u32x4())) @@ -27515,6 +29417,7 @@ pub unsafe fn _mm_cmpge_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpge_epu32_mask&expand=870) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm_mask_cmpge_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpge_epu32_mask(a, b) & k1 @@ -27525,6 +29428,7 @@ pub unsafe fn _mm_mask_cmpge_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_epu32_mask&expand=807) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm512_cmpeq_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { simd_bitmask::(simd_eq(a.as_u32x16(), b.as_u32x16())) @@ -27535,6 +29439,7 @@ pub unsafe fn _mm512_cmpeq_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_epu32_mask&expand=808) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm512_mask_cmpeq_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_cmpeq_epu32_mask(a, b) & k1 @@ -27545,6 +29450,7 @@ pub unsafe fn _mm512_mask_cmpeq_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epu32_mask&expand=805) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm256_cmpeq_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::(simd_eq(a.as_u32x8(), b.as_u32x8())) @@ -27555,6 +29461,7 @@ pub unsafe fn _mm256_cmpeq_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpeq_epu32_mask&expand=806) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm256_mask_cmpeq_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmpeq_epu32_mask(a, b) & k1 @@ -27565,6 +29472,7 @@ pub unsafe fn _mm256_mask_cmpeq_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epu32_mask&expand=803) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm_cmpeq_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_eq(a.as_u32x4(), b.as_u32x4())) @@ -27575,6 +29483,7 @@ pub unsafe fn _mm_cmpeq_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpeq_epu32_mask&expand=804) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm_mask_cmpeq_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpeq_epu32_mask(a, b) & k1 @@ -27585,6 +29494,7 @@ pub unsafe fn _mm_mask_cmpeq_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_epu32_mask&expand=1112) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm512_cmpneq_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { simd_bitmask::(simd_ne(a.as_u32x16(), b.as_u32x16())) @@ -27595,6 +29505,7 @@ pub unsafe fn _mm512_cmpneq_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_epu32_mask&expand=1113) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm512_mask_cmpneq_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_cmpneq_epu32_mask(a, b) & k1 @@ -27605,6 +29516,7 @@ pub unsafe fn _mm512_mask_cmpneq_epu32_mask(k1: __mmask16, a: __m512i, b: __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpneq_epu32_mask&expand=1110) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm256_cmpneq_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::(simd_ne(a.as_u32x8(), b.as_u32x8())) @@ -27615,6 +29527,7 @@ pub unsafe fn _mm256_cmpneq_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpneq_epu32_mask&expand=1111) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm256_mask_cmpneq_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmpneq_epu32_mask(a, b) & k1 @@ -27625,6 +29538,7 @@ pub unsafe fn _mm256_mask_cmpneq_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_epu32_mask&expand=1108) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm_cmpneq_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_ne(a.as_u32x4(), b.as_u32x4())) @@ -27635,6 +29549,7 @@ pub unsafe fn _mm_cmpneq_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpneq_epu32_mask&expand=1109) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud pub unsafe fn _mm_mask_cmpneq_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpneq_epu32_mask(a, b) & k1 @@ -27645,6 +29560,7 @@ pub unsafe fn _mm_mask_cmpneq_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_epu32_mask&expand=721) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm512_cmp_epu32_mask( @@ -27664,6 +29580,7 @@ pub unsafe fn _mm512_cmp_epu32_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_epu32_mask&expand=722) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm512_mask_cmp_epu32_mask( @@ -27683,6 +29600,7 @@ pub unsafe fn _mm512_mask_cmp_epu32_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_epu32_mask&expand=719) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm256_cmp_epu32_mask( @@ -27702,6 +29620,7 @@ pub unsafe fn _mm256_cmp_epu32_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_epu32_mask&expand=720) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm256_mask_cmp_epu32_mask( @@ -27721,6 +29640,7 @@ pub unsafe fn _mm256_mask_cmp_epu32_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_epu32_mask&expand=717) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm_cmp_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 { @@ -27737,6 +29657,7 @@ pub unsafe fn _mm_cmp_epu32_mask(a: __m128i, b: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_epu32_mask&expand=718) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm_mask_cmp_epu32_mask( @@ -27756,6 +29677,7 @@ pub unsafe fn _mm_mask_cmp_epu32_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmplt_epi32_mask&expand=1029) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm512_cmplt_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { simd_bitmask::(simd_lt(a.as_i32x16(), b.as_i32x16())) @@ -27766,6 +29688,7 @@ pub unsafe fn _mm512_cmplt_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_epi32_mask&expand=1031) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm512_mask_cmplt_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_cmplt_epi32_mask(a, b) & k1 @@ -27776,6 +29699,7 @@ pub unsafe fn _mm512_mask_cmplt_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmplt_epi32_mask&expand=1027) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm256_cmplt_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::(simd_lt(a.as_i32x8(), b.as_i32x8())) @@ -27786,6 +29710,7 @@ pub unsafe fn _mm256_cmplt_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmplt_epi32_mask&expand=1028) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm256_mask_cmplt_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmplt_epi32_mask(a, b) & k1 @@ -27796,6 +29721,7 @@ pub unsafe fn _mm256_mask_cmplt_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi32_mask&expand=1025) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm_cmplt_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_lt(a.as_i32x4(), b.as_i32x4())) @@ -27806,6 +29732,7 @@ pub unsafe fn _mm_cmplt_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmplt_epi32_mask&expand=1026) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm_mask_cmplt_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmplt_epi32_mask(a, b) & k1 @@ -27816,6 +29743,7 @@ pub unsafe fn _mm_mask_cmplt_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpgt_epi32_mask&expand=905) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm512_cmpgt_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { simd_bitmask::(simd_gt(a.as_i32x16(), b.as_i32x16())) @@ -27826,6 +29754,7 @@ pub unsafe fn _mm512_cmpgt_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpgt_epi32_mask&expand=906) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm512_mask_cmpgt_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_cmpgt_epi32_mask(a, b) & k1 @@ -27836,6 +29765,7 @@ pub unsafe fn _mm512_mask_cmpgt_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epi32_mask&expand=903) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm256_cmpgt_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::(simd_gt(a.as_i32x8(), b.as_i32x8())) @@ -27846,6 +29776,7 @@ pub unsafe fn _mm256_cmpgt_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpgt_epi32_mask&expand=904) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm256_mask_cmpgt_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmpgt_epi32_mask(a, b) & k1 @@ -27856,6 +29787,7 @@ pub unsafe fn _mm256_mask_cmpgt_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi32_mask&expand=901) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm_cmpgt_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_gt(a.as_i32x4(), b.as_i32x4())) @@ -27866,6 +29798,7 @@ pub unsafe fn _mm_cmpgt_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpgt_epi32_mask&expand=902) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm_mask_cmpgt_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpgt_epi32_mask(a, b) & k1 @@ -27876,6 +29809,7 @@ pub unsafe fn _mm_mask_cmpgt_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_epi32_mask&expand=971) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm512_cmple_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { simd_bitmask::(simd_le(a.as_i32x16(), b.as_i32x16())) @@ -27886,6 +29820,7 @@ pub unsafe fn _mm512_cmple_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_epi32_mask&expand=972) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm512_mask_cmple_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_cmple_epi32_mask(a, b) & k1 @@ -27896,6 +29831,7 @@ pub unsafe fn _mm512_mask_cmple_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmple_epi32_mask&expand=969) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm256_cmple_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::(simd_le(a.as_i32x8(), b.as_i32x8())) @@ -27906,6 +29842,7 @@ pub unsafe fn _mm256_cmple_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmple_epi32_mask&expand=970) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm256_mask_cmple_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmple_epi32_mask(a, b) & k1 @@ -27916,6 +29853,7 @@ pub unsafe fn _mm256_mask_cmple_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_epi32_mask&expand=967) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm_cmple_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_le(a.as_i32x4(), b.as_i32x4())) @@ -27926,6 +29864,7 @@ pub unsafe fn _mm_cmple_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmple_epi32_mask&expand=968) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm_mask_cmple_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmple_epi32_mask(a, b) & k1 @@ -27936,6 +29875,7 @@ pub unsafe fn _mm_mask_cmple_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpge_epi32_mask&expand=849) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm512_cmpge_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { simd_bitmask::(simd_ge(a.as_i32x16(), b.as_i32x16())) @@ -27946,6 +29886,7 @@ pub unsafe fn _mm512_cmpge_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpge_epi32_mask&expand=850) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm512_mask_cmpge_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_cmpge_epi32_mask(a, b) & k1 @@ -27956,6 +29897,7 @@ pub unsafe fn _mm512_mask_cmpge_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpge_epi32_mask&expand=847) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm256_cmpge_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::(simd_ge(a.as_i32x8(), b.as_i32x8())) @@ -27966,6 +29908,7 @@ pub unsafe fn _mm256_cmpge_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpge_epi32_mask&expand=848) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm256_mask_cmpge_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmpge_epi32_mask(a, b) & k1 @@ -27976,6 +29919,7 @@ pub unsafe fn _mm256_mask_cmpge_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_epi32_mask&expand=845) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm_cmpge_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_ge(a.as_i32x4(), b.as_i32x4())) @@ -27986,6 +29930,7 @@ pub unsafe fn _mm_cmpge_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpge_epi32_mask&expand=846) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm_mask_cmpge_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpge_epi32_mask(a, b) & k1 @@ -27996,6 +29941,7 @@ pub unsafe fn _mm_mask_cmpge_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_epi32_mask&expand=779) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm512_cmpeq_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { simd_bitmask::(simd_eq(a.as_i32x16(), b.as_i32x16())) @@ -28006,6 +29952,7 @@ pub unsafe fn _mm512_cmpeq_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_epi32_mask&expand=780) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm512_mask_cmpeq_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_cmpeq_epi32_mask(a, b) & k1 @@ -28016,6 +29963,7 @@ pub unsafe fn _mm512_mask_cmpeq_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epi32_mask&expand=777) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm256_cmpeq_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::(simd_eq(a.as_i32x8(), b.as_i32x8())) @@ -28026,6 +29974,7 @@ pub unsafe fn _mm256_cmpeq_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpeq_epi32_mask&expand=778) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm256_mask_cmpeq_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmpeq_epi32_mask(a, b) & k1 @@ -28036,6 +29985,7 @@ pub unsafe fn _mm256_mask_cmpeq_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi32_mask&expand=775) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm_cmpeq_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_eq(a.as_i32x4(), b.as_i32x4())) @@ -28046,6 +29996,7 @@ pub unsafe fn _mm_cmpeq_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpeq_epi32_mask&expand=776) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm_mask_cmpeq_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpeq_epi32_mask(a, b) & k1 @@ -28056,6 +30007,7 @@ pub unsafe fn _mm_mask_cmpeq_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_epi32_mask&expand=1088) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm512_cmpneq_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { simd_bitmask::(simd_ne(a.as_i32x16(), b.as_i32x16())) @@ -28066,6 +30018,7 @@ pub unsafe fn _mm512_cmpneq_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_epi32_mask&expand=1089) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm512_mask_cmpneq_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i) -> __mmask16 { _mm512_cmpneq_epi32_mask(a, b) & k1 @@ -28076,6 +30029,7 @@ pub unsafe fn _mm512_mask_cmpneq_epi32_mask(k1: __mmask16, a: __m512i, b: __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpneq_epi32_mask&expand=1086) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm256_cmpneq_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::(simd_ne(a.as_i32x8(), b.as_i32x8())) @@ -28086,6 +30040,7 @@ pub unsafe fn _mm256_cmpneq_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpneq_epi32_mask&expand=1087) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm256_mask_cmpneq_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmpneq_epi32_mask(a, b) & k1 @@ -28096,6 +30051,7 @@ pub unsafe fn _mm256_mask_cmpneq_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_epi32_mask&expand=1084) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm_cmpneq_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::(simd_ne(a.as_i32x4(), b.as_i32x4())) @@ -28106,6 +30062,7 @@ pub unsafe fn _mm_cmpneq_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpneq_epi32_mask&expand=1085) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd pub unsafe fn _mm_mask_cmpneq_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpneq_epi32_mask(a, b) & k1 @@ -28116,6 +30073,7 @@ pub unsafe fn _mm_mask_cmpneq_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_epi32_mask&expand=697) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm512_cmp_epi32_mask( @@ -28135,6 +30093,7 @@ pub unsafe fn _mm512_cmp_epi32_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_epi32_mask&expand=698) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm512_mask_cmp_epi32_mask( @@ -28154,6 +30113,7 @@ pub unsafe fn _mm512_mask_cmp_epi32_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=#text=_mm256_cmp_epi32_mask&expand=695) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm256_cmp_epi32_mask( @@ -28173,6 +30133,7 @@ pub unsafe fn _mm256_cmp_epi32_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_epi32_mask&expand=696) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm256_mask_cmp_epi32_mask( @@ -28192,6 +30153,7 @@ pub unsafe fn _mm256_mask_cmp_epi32_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_epi32_mask&expand=693) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm_cmp_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 { @@ -28208,6 +30170,7 @@ pub unsafe fn _mm_cmp_epi32_mask(a: __m128i, b: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_epi32_mask&expand=694) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm_mask_cmp_epi32_mask( @@ -28227,6 +30190,7 @@ pub unsafe fn _mm_mask_cmp_epi32_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmplt_epu64_mask&expand=1062) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm512_cmplt_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { simd_bitmask::<__m512i, _>(simd_lt(a.as_u64x8(), b.as_u64x8())) @@ -28237,6 +30201,7 @@ pub unsafe fn _mm512_cmplt_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_epu64_mask&expand=1063) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm512_mask_cmplt_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_cmplt_epu64_mask(a, b) & k1 @@ -28247,6 +30212,7 @@ pub unsafe fn _mm512_mask_cmplt_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmplt_epu64_mask&expand=1060) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm256_cmplt_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::<__m256i, _>(simd_lt(a.as_u64x4(), b.as_u64x4())) @@ -28257,6 +30223,7 @@ pub unsafe fn _mm256_cmplt_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmplt_epu64_mask&expand=1061) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm256_mask_cmplt_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmplt_epu64_mask(a, b) & k1 @@ -28267,6 +30234,7 @@ pub unsafe fn _mm256_mask_cmplt_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epu64_mask&expand=1058) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm_cmplt_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::<__m128i, _>(simd_lt(a.as_u64x2(), b.as_u64x2())) @@ -28277,6 +30245,7 @@ pub unsafe fn _mm_cmplt_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmplt_epu64_mask&expand=1059) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm_mask_cmplt_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmplt_epu64_mask(a, b) & k1 @@ -28287,6 +30256,7 @@ pub unsafe fn _mm_mask_cmplt_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpgt_epu64_mask&expand=939) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm512_cmpgt_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { simd_bitmask::<__m512i, _>(simd_gt(a.as_u64x8(), b.as_u64x8())) @@ -28297,6 +30267,7 @@ pub unsafe fn _mm512_cmpgt_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpgt_epu64_mask&expand=940) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm512_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_cmpgt_epu64_mask(a, b) & k1 @@ -28307,6 +30278,7 @@ pub unsafe fn _mm512_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epu64_mask&expand=937) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm256_cmpgt_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::<__m256i, _>(simd_gt(a.as_u64x4(), b.as_u64x4())) @@ -28317,6 +30289,7 @@ pub unsafe fn _mm256_cmpgt_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpgt_epu64_mask&expand=938) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm256_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmpgt_epu64_mask(a, b) & k1 @@ -28327,6 +30300,7 @@ pub unsafe fn _mm256_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epu64_mask&expand=935) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm_cmpgt_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::<__m128i, _>(simd_gt(a.as_u64x2(), b.as_u64x2())) @@ -28337,6 +30311,7 @@ pub unsafe fn _mm_cmpgt_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpgt_epu64_mask&expand=936) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpgt_epu64_mask(a, b) & k1 @@ -28347,6 +30322,7 @@ pub unsafe fn _mm_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_epu64_mask&expand=1001) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm512_cmple_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { simd_bitmask::<__m512i, _>(simd_le(a.as_u64x8(), b.as_u64x8())) @@ -28357,6 +30333,7 @@ pub unsafe fn _mm512_cmple_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_epu64_mask&expand=1002) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm512_mask_cmple_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_cmple_epu64_mask(a, b) & k1 @@ -28367,6 +30344,7 @@ pub unsafe fn _mm512_mask_cmple_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmple_epu64_mask&expand=999) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm256_cmple_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::<__m256i, _>(simd_le(a.as_u64x4(), b.as_u64x4())) @@ -28377,6 +30355,7 @@ pub unsafe fn _mm256_cmple_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmple_epu64_mask&expand=1000) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm256_mask_cmple_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmple_epu64_mask(a, b) & k1 @@ -28387,6 +30366,7 @@ pub unsafe fn _mm256_mask_cmple_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_epu64_mask&expand=997) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm_cmple_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::<__m128i, _>(simd_le(a.as_u64x2(), b.as_u64x2())) @@ -28397,6 +30377,7 @@ pub unsafe fn _mm_cmple_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmple_epu64_mask&expand=998) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm_mask_cmple_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmple_epu64_mask(a, b) & k1 @@ -28407,6 +30388,7 @@ pub unsafe fn _mm_mask_cmple_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpge_epu64_mask&expand=879) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm512_cmpge_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { simd_bitmask::<__m512i, _>(simd_ge(a.as_u64x8(), b.as_u64x8())) @@ -28417,6 +30399,7 @@ pub unsafe fn _mm512_cmpge_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpge_epu64_mask&expand=880) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm512_mask_cmpge_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_cmpge_epu64_mask(a, b) & k1 @@ -28427,6 +30410,7 @@ pub unsafe fn _mm512_mask_cmpge_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpge_epu64_mask&expand=877) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm256_cmpge_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::<__m256i, _>(simd_ge(a.as_u64x4(), b.as_u64x4())) @@ -28437,6 +30421,7 @@ pub unsafe fn _mm256_cmpge_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpge_epu64_mask&expand=878) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm256_mask_cmpge_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmpge_epu64_mask(a, b) & k1 @@ -28447,6 +30432,7 @@ pub unsafe fn _mm256_mask_cmpge_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_epu64_mask&expand=875) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm_cmpge_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::<__m128i, _>(simd_ge(a.as_u64x2(), b.as_u64x2())) @@ -28457,6 +30443,7 @@ pub unsafe fn _mm_cmpge_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpge_epu64_mask&expand=876) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm_mask_cmpge_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpge_epu64_mask(a, b) & k1 @@ -28467,6 +30454,7 @@ pub unsafe fn _mm_mask_cmpge_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_epu64_mask&expand=813) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm512_cmpeq_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { simd_bitmask::<__m512i, _>(simd_eq(a.as_u64x8(), b.as_u64x8())) @@ -28477,6 +30465,7 @@ pub unsafe fn _mm512_cmpeq_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_epu64_mask&expand=814) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm512_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_cmpeq_epu64_mask(a, b) & k1 @@ -28487,6 +30476,7 @@ pub unsafe fn _mm512_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epu64_mask&expand=811) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm256_cmpeq_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::<__m256i, _>(simd_eq(a.as_u64x4(), b.as_u64x4())) @@ -28497,6 +30487,7 @@ pub unsafe fn _mm256_cmpeq_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpeq_epu64_mask&expand=812) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm256_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmpeq_epu64_mask(a, b) & k1 @@ -28507,6 +30498,7 @@ pub unsafe fn _mm256_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epu64_mask&expand=809) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm_cmpeq_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::<__m128i, _>(simd_eq(a.as_u64x2(), b.as_u64x2())) @@ -28517,6 +30509,7 @@ pub unsafe fn _mm_cmpeq_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpeq_epu64_mask&expand=810) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpeq_epu64_mask(a, b) & k1 @@ -28527,6 +30520,7 @@ pub unsafe fn _mm_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_epu64_mask&expand=1118) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm512_cmpneq_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { simd_bitmask::<__m512i, _>(simd_ne(a.as_u64x8(), b.as_u64x8())) @@ -28537,6 +30531,7 @@ pub unsafe fn _mm512_cmpneq_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_epu64_mask&expand=1119) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm512_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_cmpneq_epu64_mask(a, b) & k1 @@ -28547,6 +30542,7 @@ pub unsafe fn _mm512_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpneq_epu64_mask&expand=1116) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm256_cmpneq_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::<__m256i, _>(simd_ne(a.as_u64x4(), b.as_u64x4())) @@ -28557,6 +30553,7 @@ pub unsafe fn _mm256_cmpneq_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpneq_epu64_mask&expand=1117) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm256_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmpneq_epu64_mask(a, b) & k1 @@ -28567,6 +30564,7 @@ pub unsafe fn _mm256_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_epu64_mask&expand=1114) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm_cmpneq_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::<__m128i, _>(simd_ne(a.as_u64x2(), b.as_u64x2())) @@ -28577,6 +30575,7 @@ pub unsafe fn _mm_cmpneq_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpneq_epu64_mask&expand=1115) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq pub unsafe fn _mm_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpneq_epu64_mask(a, b) & k1 @@ -28587,6 +30586,7 @@ pub unsafe fn _mm_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_epu64_mask&expand=727) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm512_cmp_epu64_mask( @@ -28606,6 +30606,7 @@ pub unsafe fn _mm512_cmp_epu64_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_epu64_mask&expand=728) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm512_mask_cmp_epu64_mask( @@ -28625,6 +30626,7 @@ pub unsafe fn _mm512_mask_cmp_epu64_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_epu64_mask&expand=725) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm256_cmp_epu64_mask( @@ -28644,6 +30646,7 @@ pub unsafe fn _mm256_cmp_epu64_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_epu64_mask&expand=726) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm256_mask_cmp_epu64_mask( @@ -28663,6 +30666,7 @@ pub unsafe fn _mm256_mask_cmp_epu64_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_epu64_mask&expand=723) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm_cmp_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 { @@ -28679,6 +30683,7 @@ pub unsafe fn _mm_cmp_epu64_mask(a: __m128i, b: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_epu64_mask&expand=724) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm_mask_cmp_epu64_mask( @@ -28698,6 +30703,7 @@ pub unsafe fn _mm_mask_cmp_epu64_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmplt_epi64_mask&expand=1037) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm512_cmplt_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { simd_bitmask::<__m512i, _>(simd_lt(a.as_i64x8(), b.as_i64x8())) @@ -28708,6 +30714,7 @@ pub unsafe fn _mm512_cmplt_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_epi64_mask&expand=1038) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm512_mask_cmplt_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_cmplt_epi64_mask(a, b) & k1 @@ -28718,6 +30725,7 @@ pub unsafe fn _mm512_mask_cmplt_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmplt_epi64_mask&expand=1035) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm256_cmplt_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::<__m256i, _>(simd_lt(a.as_i64x4(), b.as_i64x4())) @@ -28728,6 +30736,7 @@ pub unsafe fn _mm256_cmplt_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmplt_epi64_mask&expand=1036) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm256_mask_cmplt_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmplt_epi64_mask(a, b) & k1 @@ -28738,6 +30747,7 @@ pub unsafe fn _mm256_mask_cmplt_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi64_mask&expand=1033) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm_cmplt_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::<__m128i, _>(simd_lt(a.as_i64x2(), b.as_i64x2())) @@ -28748,6 +30758,7 @@ pub unsafe fn _mm_cmplt_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmplt_epi64_mask&expand=1034) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm_mask_cmplt_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmplt_epi64_mask(a, b) & k1 @@ -28758,6 +30769,7 @@ pub unsafe fn _mm_mask_cmplt_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpgt_epi64_mask&expand=913) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm512_cmpgt_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { simd_bitmask::<__m512i, _>(simd_gt(a.as_i64x8(), b.as_i64x8())) @@ -28768,6 +30780,7 @@ pub unsafe fn _mm512_cmpgt_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpgt_epi64_mask&expand=914) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm512_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_cmpgt_epi64_mask(a, b) & k1 @@ -28778,6 +30791,7 @@ pub unsafe fn _mm512_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epi64_mask&expand=911) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm256_cmpgt_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::<__m256i, _>(simd_gt(a.as_i64x4(), b.as_i64x4())) @@ -28788,6 +30802,7 @@ pub unsafe fn _mm256_cmpgt_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpgt_epi64_mask&expand=912) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm256_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmpgt_epi64_mask(a, b) & k1 @@ -28798,6 +30813,7 @@ pub unsafe fn _mm256_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi64_mask&expand=909) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm_cmpgt_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::<__m128i, _>(simd_gt(a.as_i64x2(), b.as_i64x2())) @@ -28808,6 +30824,7 @@ pub unsafe fn _mm_cmpgt_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpgt_epi64_mask&expand=910) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpgt_epi64_mask(a, b) & k1 @@ -28818,6 +30835,7 @@ pub unsafe fn _mm_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_epi64_mask&expand=977) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm512_cmple_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { simd_bitmask::<__m512i, _>(simd_le(a.as_i64x8(), b.as_i64x8())) @@ -28828,6 +30846,7 @@ pub unsafe fn _mm512_cmple_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_epi64_mask&expand=978) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm512_mask_cmple_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_cmple_epi64_mask(a, b) & k1 @@ -28838,6 +30857,7 @@ pub unsafe fn _mm512_mask_cmple_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmple_epi64_mask&expand=975) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm256_cmple_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::<__m256i, _>(simd_le(a.as_i64x4(), b.as_i64x4())) @@ -28848,6 +30868,7 @@ pub unsafe fn _mm256_cmple_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmple_epi64_mask&expand=976) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm256_mask_cmple_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmple_epi64_mask(a, b) & k1 @@ -28858,6 +30879,7 @@ pub unsafe fn _mm256_mask_cmple_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_epi64_mask&expand=973) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm_cmple_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::<__m128i, _>(simd_le(a.as_i64x2(), b.as_i64x2())) @@ -28868,6 +30890,7 @@ pub unsafe fn _mm_cmple_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmple_epi64_mask&expand=974) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm_mask_cmple_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmple_epi64_mask(a, b) & k1 @@ -28878,6 +30901,7 @@ pub unsafe fn _mm_mask_cmple_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpge_epi64_mask&expand=855) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm512_cmpge_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { simd_bitmask::<__m512i, _>(simd_ge(a.as_i64x8(), b.as_i64x8())) @@ -28888,6 +30912,7 @@ pub unsafe fn _mm512_cmpge_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpge_epi64_mask&expand=856) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm512_mask_cmpge_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_cmpge_epi64_mask(a, b) & k1 @@ -28898,6 +30923,7 @@ pub unsafe fn _mm512_mask_cmpge_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpge_epi64_mask&expand=853) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm256_cmpge_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::<__m256i, _>(simd_ge(a.as_i64x4(), b.as_i64x4())) @@ -28908,6 +30934,7 @@ pub unsafe fn _mm256_cmpge_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpge_epi64_mask&expand=854) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm256_mask_cmpge_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmpge_epi64_mask(a, b) & k1 @@ -28918,6 +30945,7 @@ pub unsafe fn _mm256_mask_cmpge_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_epi64_mask&expand=851) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm_cmpge_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::<__m128i, _>(simd_ge(a.as_i64x2(), b.as_i64x2())) @@ -28928,6 +30956,7 @@ pub unsafe fn _mm_cmpge_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpge_epi64_mask&expand=852) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm_mask_cmpge_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpge_epi64_mask(a, b) & k1 @@ -28938,6 +30967,7 @@ pub unsafe fn _mm_mask_cmpge_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_epi64_mask&expand=787) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm512_cmpeq_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { simd_bitmask::<__m512i, _>(simd_eq(a.as_i64x8(), b.as_i64x8())) @@ -28948,6 +30978,7 @@ pub unsafe fn _mm512_cmpeq_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_epi64_mask&expand=788) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm512_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_cmpeq_epi64_mask(a, b) & k1 @@ -28958,6 +30989,7 @@ pub unsafe fn _mm512_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epi64_mask&expand=785) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm256_cmpeq_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::<__m256i, _>(simd_eq(a.as_i64x4(), b.as_i64x4())) @@ -28968,6 +31000,7 @@ pub unsafe fn _mm256_cmpeq_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpeq_epi64_mask&expand=786) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm256_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmpeq_epi64_mask(a, b) & k1 @@ -28978,6 +31011,7 @@ pub unsafe fn _mm256_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi64_mask&expand=783) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm_cmpeq_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::<__m128i, _>(simd_eq(a.as_i64x2(), b.as_i64x2())) @@ -28988,6 +31022,7 @@ pub unsafe fn _mm_cmpeq_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpeq_epi64_mask&expand=784) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpeq_epi64_mask(a, b) & k1 @@ -28998,6 +31033,7 @@ pub unsafe fn _mm_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_epi64_mask&expand=1094) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm512_cmpneq_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { simd_bitmask::<__m512i, _>(simd_ne(a.as_i64x8(), b.as_i64x8())) @@ -29008,6 +31044,7 @@ pub unsafe fn _mm512_cmpneq_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_epi64_mask&expand=1095) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm512_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i) -> __mmask8 { _mm512_cmpneq_epi64_mask(a, b) & k1 @@ -29018,6 +31055,7 @@ pub unsafe fn _mm512_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpneq_epi64_mask&expand=1092) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm256_cmpneq_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { simd_bitmask::<__m256i, _>(simd_ne(a.as_i64x4(), b.as_i64x4())) @@ -29028,6 +31066,7 @@ pub unsafe fn _mm256_cmpneq_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpneq_epi64_mask&expand=1093) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm256_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i) -> __mmask8 { _mm256_cmpneq_epi64_mask(a, b) & k1 @@ -29038,6 +31077,7 @@ pub unsafe fn _mm256_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_epi64_mask&expand=1090) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm_cmpneq_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { simd_bitmask::<__m128i, _>(simd_ne(a.as_i64x2(), b.as_i64x2())) @@ -29048,6 +31088,7 @@ pub unsafe fn _mm_cmpneq_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpneq_epi64_mask&expand=1091) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq pub unsafe fn _mm_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -> __mmask8 { _mm_cmpneq_epi64_mask(a, b) & k1 @@ -29058,6 +31099,7 @@ pub unsafe fn _mm_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_epi64_mask&expand=703) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm512_cmp_epi64_mask( @@ -29077,6 +31119,7 @@ pub unsafe fn _mm512_cmp_epi64_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_epi64_mask&expand=704) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm512_mask_cmp_epi64_mask( @@ -29096,6 +31139,7 @@ pub unsafe fn _mm512_mask_cmp_epi64_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_epi64_mask&expand=701) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm256_cmp_epi64_mask( @@ -29115,6 +31159,7 @@ pub unsafe fn _mm256_cmp_epi64_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_epi64_mask&expand=702) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm256_mask_cmp_epi64_mask( @@ -29134,6 +31179,7 @@ pub unsafe fn _mm256_mask_cmp_epi64_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_epi64_mask&expand=699) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(2)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm_cmp_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 { @@ -29150,6 +31196,7 @@ pub unsafe fn _mm_cmp_epi64_mask(a: __m128i, b: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_epi64_mask&expand=700) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[rustc_legacy_const_generics(3)] #[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))] pub unsafe fn _mm_mask_cmp_epi64_mask( @@ -29169,6 +31216,7 @@ pub unsafe fn _mm_mask_cmp_epi64_mask( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_add_epi32&expand=4556) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_add_epi32(a: __m512i) -> i32 { simd_reduce_add_unordered(a.as_i32x16()) } @@ -29178,6 +31226,7 @@ pub unsafe fn _mm512_reduce_add_epi32(a: __m512i) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_add_epi32&expand=4555) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_add_epi32(k: __mmask16, a: __m512i) -> i32 { simd_reduce_add_unordered(simd_select_bitmask( k, @@ -29191,6 +31240,7 @@ pub unsafe fn _mm512_mask_reduce_add_epi32(k: __mmask16, a: __m512i) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_add_epi64&expand=4558) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_add_epi64(a: __m512i) -> i64 { simd_reduce_add_unordered(a.as_i64x8()) } @@ -29200,6 +31250,7 @@ pub unsafe fn _mm512_reduce_add_epi64(a: __m512i) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_add_epi64&expand=4557) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_add_epi64(k: __mmask8, a: __m512i) -> i64 { simd_reduce_add_unordered(simd_select_bitmask( k, @@ -29213,6 +31264,7 @@ pub unsafe fn _mm512_mask_reduce_add_epi64(k: __mmask8, a: __m512i) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_add_ps&expand=4562) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_add_ps(a: __m512) -> f32 { simd_reduce_add_unordered(a.as_f32x16()) } @@ -29222,6 +31274,7 @@ pub unsafe fn _mm512_reduce_add_ps(a: __m512) -> f32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_add_ps&expand=4561) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_add_ps(k: __mmask16, a: __m512) -> f32 { simd_reduce_add_unordered(simd_select_bitmask( k, @@ -29235,6 +31288,7 @@ pub unsafe fn _mm512_mask_reduce_add_ps(k: __mmask16, a: __m512) -> f32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_add_pd&expand=4560) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_add_pd(a: __m512d) -> f64 { simd_reduce_add_unordered(a.as_f64x8()) } @@ -29244,6 +31298,7 @@ pub unsafe fn _mm512_reduce_add_pd(a: __m512d) -> f64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_add_pd&expand=4559) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_add_pd(k: __mmask8, a: __m512d) -> f64 { simd_reduce_add_unordered(simd_select_bitmask( k, @@ -29257,6 +31312,7 @@ pub unsafe fn _mm512_mask_reduce_add_pd(k: __mmask8, a: __m512d) -> f64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_mul_epi32&expand=4600) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_mul_epi32(a: __m512i) -> i32 { simd_reduce_mul_unordered(a.as_i32x16()) } @@ -29266,6 +31322,7 @@ pub unsafe fn _mm512_reduce_mul_epi32(a: __m512i) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_mul_epi32&expand=4599) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_mul_epi32(k: __mmask16, a: __m512i) -> i32 { simd_reduce_mul_unordered(simd_select_bitmask( k, @@ -29279,6 +31336,7 @@ pub unsafe fn _mm512_mask_reduce_mul_epi32(k: __mmask16, a: __m512i) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_mul_epi64&expand=4602) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_mul_epi64(a: __m512i) -> i64 { simd_reduce_mul_unordered(a.as_i64x8()) } @@ -29288,6 +31346,7 @@ pub unsafe fn _mm512_reduce_mul_epi64(a: __m512i) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_mul_epi64&expand=4601) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_mul_epi64(k: __mmask8, a: __m512i) -> i64 { simd_reduce_mul_unordered(simd_select_bitmask( k, @@ -29301,6 +31360,7 @@ pub unsafe fn _mm512_mask_reduce_mul_epi64(k: __mmask8, a: __m512i) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_mul_ps&expand=4606) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_mul_ps(a: __m512) -> f32 { simd_reduce_mul_unordered(a.as_f32x16()) } @@ -29310,6 +31370,7 @@ pub unsafe fn _mm512_reduce_mul_ps(a: __m512) -> f32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_mul_ps&expand=4605) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_mul_ps(k: __mmask16, a: __m512) -> f32 { simd_reduce_mul_unordered(simd_select_bitmask( k, @@ -29323,6 +31384,7 @@ pub unsafe fn _mm512_mask_reduce_mul_ps(k: __mmask16, a: __m512) -> f32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_mul_pd&expand=4604) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_mul_pd(a: __m512d) -> f64 { simd_reduce_mul_unordered(a.as_f64x8()) } @@ -29332,6 +31394,7 @@ pub unsafe fn _mm512_reduce_mul_pd(a: __m512d) -> f64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_mul_pd&expand=4603) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_mul_pd(k: __mmask8, a: __m512d) -> f64 { simd_reduce_mul_unordered(simd_select_bitmask( k, @@ -29345,6 +31408,7 @@ pub unsafe fn _mm512_mask_reduce_mul_pd(k: __mmask8, a: __m512d) -> f64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_max_epi32&expand=4576) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_max_epi32(a: __m512i) -> i32 { simd_reduce_max(a.as_i32x16()) } @@ -29354,6 +31418,7 @@ pub unsafe fn _mm512_reduce_max_epi32(a: __m512i) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_max_epi32&expand=4575) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_max_epi32(k: __mmask16, a: __m512i) -> i32 { simd_reduce_max(simd_select_bitmask( k, @@ -29367,6 +31432,7 @@ pub unsafe fn _mm512_mask_reduce_max_epi32(k: __mmask16, a: __m512i) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_max_epi64&expand=4578) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_max_epi64(a: __m512i) -> i64 { simd_reduce_max(a.as_i64x8()) } @@ -29376,6 +31442,7 @@ pub unsafe fn _mm512_reduce_max_epi64(a: __m512i) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_max_epi64&expand=4577) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_max_epi64(k: __mmask8, a: __m512i) -> i64 { simd_reduce_max(simd_select_bitmask( k, @@ -29389,6 +31456,7 @@ pub unsafe fn _mm512_mask_reduce_max_epi64(k: __mmask8, a: __m512i) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_max_epu32&expand=4580) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_max_epu32(a: __m512i) -> u32 { simd_reduce_max(a.as_u32x16()) } @@ -29398,6 +31466,7 @@ pub unsafe fn _mm512_reduce_max_epu32(a: __m512i) -> u32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_max_epu32&expand=4579) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_max_epu32(k: __mmask16, a: __m512i) -> u32 { simd_reduce_max(simd_select_bitmask( k, @@ -29411,6 +31480,7 @@ pub unsafe fn _mm512_mask_reduce_max_epu32(k: __mmask16, a: __m512i) -> u32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_max_epu64&expand=4582) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_max_epu64(a: __m512i) -> u64 { simd_reduce_max(a.as_u64x8()) } @@ -29420,6 +31490,7 @@ pub unsafe fn _mm512_reduce_max_epu64(a: __m512i) -> u64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_max_epu64&expand=4581) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_max_epu64(k: __mmask8, a: __m512i) -> u64 { simd_reduce_max(simd_select_bitmask( k, @@ -29433,6 +31504,7 @@ pub unsafe fn _mm512_mask_reduce_max_epu64(k: __mmask8, a: __m512i) -> u64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_max_ps&expand=4586) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_max_ps(a: __m512) -> f32 { simd_reduce_max(a.as_f32x16()) } @@ -29442,6 +31514,7 @@ pub unsafe fn _mm512_reduce_max_ps(a: __m512) -> f32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_max_ps&expand=4585) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_max_ps(k: __mmask16, a: __m512) -> f32 { simd_reduce_max(simd_select_bitmask( k, @@ -29455,6 +31528,7 @@ pub unsafe fn _mm512_mask_reduce_max_ps(k: __mmask16, a: __m512) -> f32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_max_pd&expand=4584) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_max_pd(a: __m512d) -> f64 { simd_reduce_max(a.as_f64x8()) } @@ -29464,6 +31538,7 @@ pub unsafe fn _mm512_reduce_max_pd(a: __m512d) -> f64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_max_pd&expand=4583) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_max_pd(k: __mmask8, a: __m512d) -> f64 { simd_reduce_max(simd_select_bitmask( k, @@ -29477,6 +31552,7 @@ pub unsafe fn _mm512_mask_reduce_max_pd(k: __mmask8, a: __m512d) -> f64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_min_epi32&expand=4588) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_min_epi32(a: __m512i) -> i32 { simd_reduce_min(a.as_i32x16()) } @@ -29486,6 +31562,7 @@ pub unsafe fn _mm512_reduce_min_epi32(a: __m512i) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_min_epi32&expand=4587) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_min_epi32(k: __mmask16, a: __m512i) -> i32 { simd_reduce_min(simd_select_bitmask( k, @@ -29499,6 +31576,7 @@ pub unsafe fn _mm512_mask_reduce_min_epi32(k: __mmask16, a: __m512i) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_min_epi64&expand=4590) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_min_epi64(a: __m512i) -> i64 { simd_reduce_min(a.as_i64x8()) } @@ -29508,6 +31586,7 @@ pub unsafe fn _mm512_reduce_min_epi64(a: __m512i) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_min_epi64&expand=4589) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_min_epi64(k: __mmask8, a: __m512i) -> i64 { simd_reduce_min(simd_select_bitmask( k, @@ -29521,6 +31600,7 @@ pub unsafe fn _mm512_mask_reduce_min_epi64(k: __mmask8, a: __m512i) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_min_epu32&expand=4592) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_min_epu32(a: __m512i) -> u32 { simd_reduce_min(a.as_u32x16()) } @@ -29530,6 +31610,7 @@ pub unsafe fn _mm512_reduce_min_epu32(a: __m512i) -> u32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_min_epu32&expand=4591) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_min_epu32(k: __mmask16, a: __m512i) -> u32 { simd_reduce_min(simd_select_bitmask( k, @@ -29543,6 +31624,7 @@ pub unsafe fn _mm512_mask_reduce_min_epu32(k: __mmask16, a: __m512i) -> u32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_min_epu64&expand=4594) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_min_epu64(a: __m512i) -> u64 { simd_reduce_min(a.as_u64x8()) } @@ -29552,6 +31634,7 @@ pub unsafe fn _mm512_reduce_min_epu64(a: __m512i) -> u64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_min_epi64&expand=4589) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_min_epu64(k: __mmask8, a: __m512i) -> u64 { simd_reduce_min(simd_select_bitmask( k, @@ -29565,6 +31648,7 @@ pub unsafe fn _mm512_mask_reduce_min_epu64(k: __mmask8, a: __m512i) -> u64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_min_ps&expand=4598) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_min_ps(a: __m512) -> f32 { simd_reduce_min(a.as_f32x16()) } @@ -29574,6 +31658,7 @@ pub unsafe fn _mm512_reduce_min_ps(a: __m512) -> f32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_min_ps&expand=4597) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_min_ps(k: __mmask16, a: __m512) -> f32 { simd_reduce_min(simd_select_bitmask( k, @@ -29587,6 +31672,7 @@ pub unsafe fn _mm512_mask_reduce_min_ps(k: __mmask16, a: __m512) -> f32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_min_pd&expand=4596) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_min_pd(a: __m512d) -> f64 { simd_reduce_min(a.as_f64x8()) } @@ -29596,6 +31682,7 @@ pub unsafe fn _mm512_reduce_min_pd(a: __m512d) -> f64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_min_pd&expand=4595) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_min_pd(k: __mmask8, a: __m512d) -> f64 { simd_reduce_min(simd_select_bitmask( k, @@ -29609,6 +31696,7 @@ pub unsafe fn _mm512_mask_reduce_min_pd(k: __mmask8, a: __m512d) -> f64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_and_epi32&expand=4564) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_and_epi32(a: __m512i) -> i32 { simd_reduce_and(a.as_i32x16()) } @@ -29618,6 +31706,7 @@ pub unsafe fn _mm512_reduce_and_epi32(a: __m512i) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_and_epi32&expand=4563) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_and_epi32(k: __mmask16, a: __m512i) -> i32 { simd_reduce_and(simd_select_bitmask( k, @@ -29631,6 +31720,7 @@ pub unsafe fn _mm512_mask_reduce_and_epi32(k: __mmask16, a: __m512i) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_and_epi64&expand=4566) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_and_epi64(a: __m512i) -> i64 { simd_reduce_and(a.as_i64x8()) } @@ -29640,6 +31730,7 @@ pub unsafe fn _mm512_reduce_and_epi64(a: __m512i) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_add_epi64&expand=4557) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_and_epi64(k: __mmask8, a: __m512i) -> i64 { simd_reduce_and(simd_select_bitmask( k, @@ -29654,6 +31745,7 @@ pub unsafe fn _mm512_mask_reduce_and_epi64(k: __mmask8, a: __m512i) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_or_epi32&expand=4608) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_or_epi32(a: __m512i) -> i32 { simd_reduce_or(a.as_i32x16()) } @@ -29663,6 +31755,7 @@ pub unsafe fn _mm512_reduce_or_epi32(a: __m512i) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_or_epi32&expand=4607) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_or_epi32(k: __mmask16, a: __m512i) -> i32 { simd_reduce_or(simd_select_bitmask( k, @@ -29676,6 +31769,7 @@ pub unsafe fn _mm512_mask_reduce_or_epi32(k: __mmask16, a: __m512i) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_or_epi64&expand=4610) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_reduce_or_epi64(a: __m512i) -> i64 { simd_reduce_or(a.as_i64x8()) } @@ -29685,6 +31779,7 @@ pub unsafe fn _mm512_reduce_or_epi64(a: __m512i) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_or_epi64&expand=4609) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_reduce_or_epi64(k: __mmask8, a: __m512i) -> i64 { simd_reduce_or(simd_select_bitmask( k, @@ -29700,6 +31795,7 @@ pub unsafe fn _mm512_mask_reduce_or_epi64(k: __mmask8, a: __m512i) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_undefined_pd) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] // This intrinsic has no corresponding instruction. pub unsafe fn _mm512_undefined_pd() -> __m512d { _mm512_set1_pd(0.0) @@ -29712,6 +31808,7 @@ pub unsafe fn _mm512_undefined_pd() -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_undefined_ps) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] // This intrinsic has no corresponding instruction. pub unsafe fn _mm512_undefined_ps() -> __m512 { _mm512_set1_ps(0.0) @@ -29724,6 +31821,7 @@ pub unsafe fn _mm512_undefined_ps() -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_undefined_epi32&expand=5995) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] // This intrinsic has no corresponding instruction. pub unsafe fn _mm512_undefined_epi32() -> __m512i { _mm512_set1_epi32(0) @@ -29736,6 +31834,7 @@ pub unsafe fn _mm512_undefined_epi32() -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_undefined&expand=5994) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] // This intrinsic has no corresponding instruction. pub unsafe fn _mm512_undefined() -> __m512 { _mm512_set1_ps(0.0) @@ -29746,6 +31845,7 @@ pub unsafe fn _mm512_undefined() -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_loadu_epi32&expand=3377) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32 pub unsafe fn _mm512_loadu_epi32(mem_addr: *const i32) -> __m512i { ptr::read_unaligned(mem_addr as *const __m512i) @@ -29756,6 +31856,7 @@ pub unsafe fn _mm512_loadu_epi32(mem_addr: *const i32) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_loadu_epi32&expand=3374) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32 pub unsafe fn _mm256_loadu_epi32(mem_addr: *const i32) -> __m256i { ptr::read_unaligned(mem_addr as *const __m256i) @@ -29766,6 +31867,7 @@ pub unsafe fn _mm256_loadu_epi32(mem_addr: *const i32) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_epi32&expand=3371) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32 pub unsafe fn _mm_loadu_epi32(mem_addr: *const i32) -> __m128i { ptr::read_unaligned(mem_addr as *const __m128i) @@ -29776,6 +31878,7 @@ pub unsafe fn _mm_loadu_epi32(mem_addr: *const i32) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi32_storeu_epi16&expand=1460) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdw))] pub unsafe fn _mm512_mask_cvtepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask16, a: __m512i) { vpmovdwmem(mem_addr, a.as_i32x16(), k); @@ -29786,6 +31889,7 @@ pub unsafe fn _mm512_mask_cvtepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask16, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi32_storeu_epi8&expand=1462) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdw))] pub unsafe fn _mm256_mask_cvtepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m256i) { vpmovdwmem256(mem_addr, a.as_i32x8(), k); @@ -29796,6 +31900,7 @@ pub unsafe fn _mm256_mask_cvtepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi32_storeu_epi8&expand=1461) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdw))] pub unsafe fn _mm_mask_cvtepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovdwmem128(mem_addr, a.as_i32x4(), k); @@ -29806,6 +31911,7 @@ pub unsafe fn _mm_mask_cvtepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi32_storeu_epi16&expand=1833) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdw))] pub unsafe fn _mm512_mask_cvtsepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask16, a: __m512i) { vpmovsdwmem(mem_addr, a.as_i32x16(), k); @@ -29816,6 +31922,7 @@ pub unsafe fn _mm512_mask_cvtsepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask16 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi32_storeu_epi16&expand=1832) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdw))] pub unsafe fn _mm256_mask_cvtsepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m256i) { vpmovsdwmem256(mem_addr, a.as_i32x8(), k); @@ -29826,6 +31933,7 @@ pub unsafe fn _mm256_mask_cvtsepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi32_storeu_epi16&expand=1831) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdw))] pub unsafe fn _mm_mask_cvtsepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovsdwmem128(mem_addr, a.as_i32x4(), k); @@ -29836,6 +31944,7 @@ pub unsafe fn _mm_mask_cvtsepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi32_storeu_epi16&expand=2068) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdw))] pub unsafe fn _mm512_mask_cvtusepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask16, a: __m512i) { vpmovusdwmem(mem_addr, a.as_i32x16(), k); @@ -29846,6 +31955,7 @@ pub unsafe fn _mm512_mask_cvtusepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi32_storeu_epi16&expand=2067) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdw))] pub unsafe fn _mm256_mask_cvtusepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m256i) { vpmovusdwmem256(mem_addr, a.as_i32x8(), k); @@ -29856,6 +31966,7 @@ pub unsafe fn _mm256_mask_cvtusepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi32_storeu_epi16&expand=2066) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdw))] pub unsafe fn _mm_mask_cvtusepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovusdwmem128(mem_addr, a.as_i32x4(), k); @@ -29866,6 +31977,7 @@ pub unsafe fn _mm_mask_cvtusepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi32_storeu_epi8&expand=1463) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdb))] pub unsafe fn _mm512_mask_cvtepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask16, a: __m512i) { vpmovdbmem(mem_addr, a.as_i32x16(), k); @@ -29876,6 +31988,7 @@ pub unsafe fn _mm512_mask_cvtepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask16, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi32_storeu_epi8&expand=1462) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdb))] pub unsafe fn _mm256_mask_cvtepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m256i) { vpmovdbmem256(mem_addr, a.as_i32x8(), k); @@ -29886,6 +31999,7 @@ pub unsafe fn _mm256_mask_cvtepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi32_storeu_epi8&expand=1461) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovdb))] pub unsafe fn _mm_mask_cvtepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovdbmem128(mem_addr, a.as_i32x4(), k); @@ -29896,6 +32010,7 @@ pub unsafe fn _mm_mask_cvtepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi32_storeu_epi8&expand=1836) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdb))] pub unsafe fn _mm512_mask_cvtsepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask16, a: __m512i) { vpmovsdbmem(mem_addr, a.as_i32x16(), k); @@ -29906,6 +32021,7 @@ pub unsafe fn _mm512_mask_cvtsepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask16, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi32_storeu_epi8&expand=1835) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdb))] pub unsafe fn _mm256_mask_cvtsepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m256i) { vpmovsdbmem256(mem_addr, a.as_i32x8(), k); @@ -29916,6 +32032,7 @@ pub unsafe fn _mm256_mask_cvtsepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi32_storeu_epi8&expand=1834) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsdb))] pub unsafe fn _mm_mask_cvtsepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovsdbmem128(mem_addr, a.as_i32x4(), k); @@ -29926,6 +32043,7 @@ pub unsafe fn _mm_mask_cvtsepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi32_storeu_epi8&expand=2071) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdb))] pub unsafe fn _mm512_mask_cvtusepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask16, a: __m512i) { vpmovusdbmem(mem_addr, a.as_i32x16(), k); @@ -29936,6 +32054,7 @@ pub unsafe fn _mm512_mask_cvtusepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask16 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi32_storeu_epi8&expand=2070) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdb))] pub unsafe fn _mm256_mask_cvtusepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m256i) { vpmovusdbmem256(mem_addr, a.as_i32x8(), k); @@ -29946,6 +32065,7 @@ pub unsafe fn _mm256_mask_cvtusepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi32_storeu_epi8&expand=2069) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusdb))] pub unsafe fn _mm_mask_cvtusepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovusdbmem128(mem_addr, a.as_i32x4(), k); @@ -29956,6 +32076,7 @@ pub unsafe fn _mm_mask_cvtusepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi64_storeu_epi16&expand=1513) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqw))] pub unsafe fn _mm512_mask_cvtepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m512i) { vpmovqwmem(mem_addr, a.as_i64x8(), k); @@ -29966,6 +32087,7 @@ pub unsafe fn _mm512_mask_cvtepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi64_storeu_epi16&expand=1512) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqw))] pub unsafe fn _mm256_mask_cvtepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m256i) { vpmovqwmem256(mem_addr, a.as_i64x4(), k); @@ -29976,6 +32098,7 @@ pub unsafe fn _mm256_mask_cvtepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi64_storeu_epi16&expand=1511) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqw))] pub unsafe fn _mm_mask_cvtepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovqwmem128(mem_addr, a.as_i64x2(), k); @@ -29986,6 +32109,7 @@ pub unsafe fn _mm_mask_cvtepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi64_storeu_epi16&expand=1866) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqw))] pub unsafe fn _mm512_mask_cvtsepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m512i) { vpmovsqwmem(mem_addr, a.as_i64x8(), k); @@ -29996,6 +32120,7 @@ pub unsafe fn _mm512_mask_cvtsepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi64_storeu_epi16&expand=1865) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqw))] pub unsafe fn _mm256_mask_cvtsepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m256i) { vpmovsqwmem256(mem_addr, a.as_i64x4(), k); @@ -30006,6 +32131,7 @@ pub unsafe fn _mm256_mask_cvtsepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi64_storeu_epi16&expand=1864) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqw))] pub unsafe fn _mm_mask_cvtsepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovsqwmem128(mem_addr, a.as_i64x2(), k); @@ -30016,6 +32142,7 @@ pub unsafe fn _mm_mask_cvtsepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi64_storeu_epi16&expand=2101) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqw))] pub unsafe fn _mm512_mask_cvtusepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m512i) { vpmovusqwmem(mem_addr, a.as_i64x8(), k); @@ -30026,6 +32153,7 @@ pub unsafe fn _mm512_mask_cvtusepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi64_storeu_epi16&expand=2100) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqw))] pub unsafe fn _mm256_mask_cvtusepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m256i) { vpmovusqwmem256(mem_addr, a.as_i64x4(), k); @@ -30036,6 +32164,7 @@ pub unsafe fn _mm256_mask_cvtusepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi64_storeu_epi16&expand=2099) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqw))] pub unsafe fn _mm_mask_cvtusepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovusqwmem128(mem_addr, a.as_i64x2(), k); @@ -30046,6 +32175,7 @@ pub unsafe fn _mm_mask_cvtusepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi64_storeu_epi8&expand=1519) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqb))] pub unsafe fn _mm512_mask_cvtepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m512i) { vpmovqbmem(mem_addr, a.as_i64x8(), k); @@ -30056,6 +32186,7 @@ pub unsafe fn _mm512_mask_cvtepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi64_storeu_epi8&expand=1518) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqb))] pub unsafe fn _mm256_mask_cvtepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m256i) { vpmovqbmem256(mem_addr, a.as_i64x4(), k); @@ -30066,6 +32197,7 @@ pub unsafe fn _mm256_mask_cvtepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi64_storeu_epi8&expand=1517) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqb))] pub unsafe fn _mm_mask_cvtepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovqbmem128(mem_addr, a.as_i64x2(), k); @@ -30076,6 +32208,7 @@ pub unsafe fn _mm_mask_cvtepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi64_storeu_epi8&expand=1872) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqb))] pub unsafe fn _mm512_mask_cvtsepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m512i) { vpmovsqbmem(mem_addr, a.as_i64x8(), k); @@ -30086,6 +32219,7 @@ pub unsafe fn _mm512_mask_cvtsepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi64_storeu_epi8&expand=1871) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqb))] pub unsafe fn _mm256_mask_cvtsepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m256i) { vpmovsqbmem256(mem_addr, a.as_i64x4(), k); @@ -30096,6 +32230,7 @@ pub unsafe fn _mm256_mask_cvtsepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi64_storeu_epi8&expand=1870) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqb))] pub unsafe fn _mm_mask_cvtsepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovsqbmem128(mem_addr, a.as_i64x2(), k); @@ -30106,6 +32241,7 @@ pub unsafe fn _mm_mask_cvtsepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi64_storeu_epi8&expand=2107) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqb))] pub unsafe fn _mm512_mask_cvtusepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m512i) { vpmovusqbmem(mem_addr, a.as_i64x8(), k); @@ -30116,6 +32252,7 @@ pub unsafe fn _mm512_mask_cvtusepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi64_storeu_epi8&expand=2106) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqb))] pub unsafe fn _mm256_mask_cvtusepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m256i) { vpmovusqbmem256(mem_addr, a.as_i64x4(), k); @@ -30126,6 +32263,7 @@ pub unsafe fn _mm256_mask_cvtusepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi64_storeu_epi8&expand=2105) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqb))] pub unsafe fn _mm_mask_cvtusepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovusqbmem128(mem_addr, a.as_i64x2(), k); @@ -30136,6 +32274,7 @@ pub unsafe fn _mm_mask_cvtusepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi64_storeu_epi32&expand=1516) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqd))] pub unsafe fn _mm512_mask_cvtepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m512i) { vpmovqdmem(mem_addr, a.as_i64x8(), k); @@ -30146,6 +32285,7 @@ pub unsafe fn _mm512_mask_cvtepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi64_storeu_epi32&expand=1515) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqd))] pub unsafe fn _mm256_mask_cvtepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m256i) { vpmovqdmem256(mem_addr, a.as_i64x4(), k); @@ -30156,6 +32296,7 @@ pub unsafe fn _mm256_mask_cvtepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi64_storeu_epi32&expand=1514) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovqd))] pub unsafe fn _mm_mask_cvtepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovqdmem128(mem_addr, a.as_i64x2(), k); @@ -30166,6 +32307,7 @@ pub unsafe fn _mm_mask_cvtepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi64_storeu_epi32&expand=1869) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqd))] pub unsafe fn _mm512_mask_cvtsepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m512i) { vpmovsqdmem(mem_addr, a.as_i64x8(), k); @@ -30176,6 +32318,7 @@ pub unsafe fn _mm512_mask_cvtsepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi64_storeu_epi32&expand=1868) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqd))] pub unsafe fn _mm256_mask_cvtsepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m256i) { vpmovsqdmem256(mem_addr, a.as_i64x4(), k); @@ -30186,6 +32329,7 @@ pub unsafe fn _mm256_mask_cvtsepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi64_storeu_epi32&expand=1867) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovsqd))] pub unsafe fn _mm_mask_cvtsepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovsqdmem128(mem_addr, a.as_i64x2(), k); @@ -30196,6 +32340,7 @@ pub unsafe fn _mm_mask_cvtsepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi64_storeu_epi32&expand=2104) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqd))] pub unsafe fn _mm512_mask_cvtusepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m512i) { vpmovusqdmem(mem_addr, a.as_i64x8(), k); @@ -30206,6 +32351,7 @@ pub unsafe fn _mm512_mask_cvtusepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi64_storeu_epi32&expand=2103) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqd))] pub unsafe fn _mm256_mask_cvtusepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m256i) { vpmovusqdmem256(mem_addr, a.as_i64x4(), k); @@ -30216,6 +32362,7 @@ pub unsafe fn _mm256_mask_cvtusepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi64_storeu_epi32&expand=2102) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmovusqd))] pub unsafe fn _mm_mask_cvtusepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a: __m128i) { vpmovusqdmem128(mem_addr, a.as_i64x2(), k); @@ -30226,6 +32373,7 @@ pub unsafe fn _mm_mask_cvtusepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_storeu_epi32&expand=5628) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32 pub unsafe fn _mm512_storeu_epi32(mem_addr: *mut i32, a: __m512i) { ptr::write_unaligned(mem_addr as *mut __m512i, a); @@ -30236,6 +32384,7 @@ pub unsafe fn _mm512_storeu_epi32(mem_addr: *mut i32, a: __m512i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_storeu_epi32&expand=5626) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32 pub unsafe fn _mm256_storeu_epi32(mem_addr: *mut i32, a: __m256i) { ptr::write_unaligned(mem_addr as *mut __m256i, a); @@ -30246,6 +32395,7 @@ pub unsafe fn _mm256_storeu_epi32(mem_addr: *mut i32, a: __m256i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_epi32&expand=5624) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32 pub unsafe fn _mm_storeu_epi32(mem_addr: *mut i32, a: __m128i) { ptr::write_unaligned(mem_addr as *mut __m128i, a); @@ -30256,6 +32406,7 @@ pub unsafe fn _mm_storeu_epi32(mem_addr: *mut i32, a: __m128i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_loadu_epi64&expand=3386) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64 pub unsafe fn _mm512_loadu_epi64(mem_addr: *const i64) -> __m512i { ptr::read_unaligned(mem_addr as *const __m512i) @@ -30266,6 +32417,7 @@ pub unsafe fn _mm512_loadu_epi64(mem_addr: *const i64) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_loadu_epi64&expand=3383) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64 pub unsafe fn _mm256_loadu_epi64(mem_addr: *const i64) -> __m256i { ptr::read_unaligned(mem_addr as *const __m256i) @@ -30276,6 +32428,7 @@ pub unsafe fn _mm256_loadu_epi64(mem_addr: *const i64) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_epi64&expand=3380) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64 pub unsafe fn _mm_loadu_epi64(mem_addr: *const i64) -> __m128i { ptr::read_unaligned(mem_addr as *const __m128i) @@ -30286,6 +32439,7 @@ pub unsafe fn _mm_loadu_epi64(mem_addr: *const i64) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_storeu_epi64&expand=5634) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64 pub unsafe fn _mm512_storeu_epi64(mem_addr: *mut i64, a: __m512i) { ptr::write_unaligned(mem_addr as *mut __m512i, a); @@ -30296,6 +32450,7 @@ pub unsafe fn _mm512_storeu_epi64(mem_addr: *mut i64, a: __m512i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_storeu_epi64&expand=5632) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64 pub unsafe fn _mm256_storeu_epi64(mem_addr: *mut i64, a: __m256i) { ptr::write_unaligned(mem_addr as *mut __m256i, a); @@ -30306,6 +32461,7 @@ pub unsafe fn _mm256_storeu_epi64(mem_addr: *mut i64, a: __m256i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_epi64&expand=5630) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64 pub unsafe fn _mm_storeu_epi64(mem_addr: *mut i64, a: __m128i) { ptr::write_unaligned(mem_addr as *mut __m128i, a); @@ -30316,6 +32472,7 @@ pub unsafe fn _mm_storeu_epi64(mem_addr: *mut i64, a: __m128i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_loadu_si512&expand=3420) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32 pub unsafe fn _mm512_loadu_si512(mem_addr: *const i32) -> __m512i { ptr::read_unaligned(mem_addr as *const __m512i) @@ -30326,6 +32483,7 @@ pub unsafe fn _mm512_loadu_si512(mem_addr: *const i32) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_storeu_si512&expand=5657) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32 pub unsafe fn _mm512_storeu_si512(mem_addr: *mut i32, a: __m512i) { ptr::write_unaligned(mem_addr as *mut __m512i, a); @@ -30338,6 +32496,7 @@ pub unsafe fn _mm512_storeu_si512(mem_addr: *mut i32, a: __m512i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_loadu_pd) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] pub unsafe fn _mm512_loadu_pd(mem_addr: *const f64) -> __m512d { ptr::read_unaligned(mem_addr as *const __m512d) @@ -30350,6 +32509,7 @@ pub unsafe fn _mm512_loadu_pd(mem_addr: *const f64) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_storeu_pd) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] pub unsafe fn _mm512_storeu_pd(mem_addr: *mut f64, a: __m512d) { ptr::write_unaligned(mem_addr as *mut __m512d, a); @@ -30362,6 +32522,7 @@ pub unsafe fn _mm512_storeu_pd(mem_addr: *mut f64, a: __m512d) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_loadu_ps) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] pub unsafe fn _mm512_loadu_ps(mem_addr: *const f32) -> __m512 { ptr::read_unaligned(mem_addr as *const __m512) @@ -30374,8 +32535,8 @@ pub unsafe fn _mm512_loadu_ps(mem_addr: *const f32) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_storeu_ps) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovups))] -#[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm512_storeu_ps(mem_addr: *mut f32, a: __m512) { ptr::write_unaligned(mem_addr as *mut __m512, a); } @@ -30385,6 +32546,7 @@ pub unsafe fn _mm512_storeu_ps(mem_addr: *mut f32, a: __m512) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_load_si512&expand=3345) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa32 pub unsafe fn _mm512_load_si512(mem_addr: *const i32) -> __m512i { ptr::read(mem_addr as *const __m512i) @@ -30395,6 +32557,7 @@ pub unsafe fn _mm512_load_si512(mem_addr: *const i32) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_store_si512&expand=5598) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa32 pub unsafe fn _mm512_store_si512(mem_addr: *mut i32, a: __m512i) { ptr::write(mem_addr as *mut __m512i, a); @@ -30405,6 +32568,7 @@ pub unsafe fn _mm512_store_si512(mem_addr: *mut i32, a: __m512i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_load_epi32&expand=3304) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa32 pub unsafe fn _mm512_load_epi32(mem_addr: *const i32) -> __m512i { ptr::read(mem_addr as *const __m512i) @@ -30415,6 +32579,7 @@ pub unsafe fn _mm512_load_epi32(mem_addr: *const i32) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_load_epi32&expand=3301) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa32 pub unsafe fn _mm256_load_epi32(mem_addr: *const i32) -> __m256i { ptr::read(mem_addr as *const __m256i) @@ -30425,6 +32590,7 @@ pub unsafe fn _mm256_load_epi32(mem_addr: *const i32) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_epi32&expand=3298) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa32 pub unsafe fn _mm_load_epi32(mem_addr: *const i32) -> __m128i { ptr::read(mem_addr as *const __m128i) @@ -30435,6 +32601,7 @@ pub unsafe fn _mm_load_epi32(mem_addr: *const i32) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_store_epi32&expand=5569) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa32 pub unsafe fn _mm512_store_epi32(mem_addr: *mut i32, a: __m512i) { ptr::write(mem_addr as *mut __m512i, a); @@ -30445,6 +32612,7 @@ pub unsafe fn _mm512_store_epi32(mem_addr: *mut i32, a: __m512i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_store_epi32&expand=5567) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa32 pub unsafe fn _mm256_store_epi32(mem_addr: *mut i32, a: __m256i) { ptr::write(mem_addr as *mut __m256i, a); @@ -30455,6 +32623,7 @@ pub unsafe fn _mm256_store_epi32(mem_addr: *mut i32, a: __m256i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_epi32&expand=5565) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa32 pub unsafe fn _mm_store_epi32(mem_addr: *mut i32, a: __m128i) { ptr::write(mem_addr as *mut __m128i, a); @@ -30465,6 +32634,7 @@ pub unsafe fn _mm_store_epi32(mem_addr: *mut i32, a: __m128i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_load_epi64&expand=3313) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa64 pub unsafe fn _mm512_load_epi64(mem_addr: *const i64) -> __m512i { ptr::read(mem_addr as *const __m512i) @@ -30475,6 +32645,7 @@ pub unsafe fn _mm512_load_epi64(mem_addr: *const i64) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_load_epi64&expand=3310) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa64 pub unsafe fn _mm256_load_epi64(mem_addr: *const i64) -> __m256i { ptr::read(mem_addr as *const __m256i) @@ -30485,6 +32656,7 @@ pub unsafe fn _mm256_load_epi64(mem_addr: *const i64) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_epi64&expand=3307) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa64 pub unsafe fn _mm_load_epi64(mem_addr: *const i64) -> __m128i { ptr::read(mem_addr as *const __m128i) @@ -30495,6 +32667,7 @@ pub unsafe fn _mm_load_epi64(mem_addr: *const i64) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_store_epi64&expand=5575) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa64 pub unsafe fn _mm512_store_epi64(mem_addr: *mut i64, a: __m512i) { ptr::write(mem_addr as *mut __m512i, a); @@ -30505,6 +32678,7 @@ pub unsafe fn _mm512_store_epi64(mem_addr: *mut i64, a: __m512i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_store_epi64&expand=5573) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa64 pub unsafe fn _mm256_store_epi64(mem_addr: *mut i64, a: __m256i) { ptr::write(mem_addr as *mut __m256i, a); @@ -30515,6 +32689,7 @@ pub unsafe fn _mm256_store_epi64(mem_addr: *mut i64, a: __m256i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_epi64&expand=5571) #[inline] #[target_feature(enable = "avx512f,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa64 pub unsafe fn _mm_store_epi64(mem_addr: *mut i64, a: __m128i) { ptr::write(mem_addr as *mut __m128i, a); @@ -30525,6 +32700,7 @@ pub unsafe fn _mm_store_epi64(mem_addr: *mut i64, a: __m128i) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_load_ps&expand=3336) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] pub unsafe fn _mm512_load_ps(mem_addr: *const f32) -> __m512 { ptr::read(mem_addr as *const __m512) @@ -30535,6 +32711,7 @@ pub unsafe fn _mm512_load_ps(mem_addr: *const f32) -> __m512 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_store_ps&expand=5592) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] pub unsafe fn _mm512_store_ps(mem_addr: *mut f32, a: __m512) { ptr::write(mem_addr as *mut __m512, a); @@ -30545,6 +32722,7 @@ pub unsafe fn _mm512_store_ps(mem_addr: *mut f32, a: __m512) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_load_pd&expand=3326) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vmovapd pub unsafe fn _mm512_load_pd(mem_addr: *const f64) -> __m512d { ptr::read(mem_addr as *const __m512d) @@ -30555,6 +32733,7 @@ pub unsafe fn _mm512_load_pd(mem_addr: *const f64) -> __m512d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_store_pd&expand=5585) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovaps))] //should be vmovapd pub unsafe fn _mm512_store_pd(mem_addr: *mut f64, a: __m512d) { ptr::write(mem_addr as *mut __m512d, a); @@ -30567,6 +32746,7 @@ pub unsafe fn _mm512_store_pd(mem_addr: *mut f64, a: __m512d) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_loadu_epi32) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_loadu_epi32(src: __m512i, k: __mmask16, mem_addr: *const i32) -> __m512i { let mut dst: __m512i = src; asm!( @@ -30586,6 +32766,7 @@ pub unsafe fn _mm512_mask_loadu_epi32(src: __m512i, k: __mmask16, mem_addr: *con /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_loadu_epi32) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_loadu_epi32(k: __mmask16, mem_addr: *const i32) -> __m512i { let mut dst: __m512i; asm!( @@ -30605,6 +32786,7 @@ pub unsafe fn _mm512_maskz_loadu_epi32(k: __mmask16, mem_addr: *const i32) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_loadu_epi64) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_loadu_epi64(src: __m512i, k: __mmask8, mem_addr: *const i64) -> __m512i { let mut dst: __m512i = src; asm!( @@ -30624,6 +32806,7 @@ pub unsafe fn _mm512_mask_loadu_epi64(src: __m512i, k: __mmask8, mem_addr: *cons /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_loadu_epi64) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m512i { let mut dst: __m512i; asm!( @@ -30643,6 +32826,7 @@ pub unsafe fn _mm512_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_loadu_ps) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_loadu_ps(src: __m512, k: __mmask16, mem_addr: *const f32) -> __m512 { let mut dst: __m512 = src; asm!( @@ -30662,6 +32846,7 @@ pub unsafe fn _mm512_mask_loadu_ps(src: __m512, k: __mmask16, mem_addr: *const f /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_loadu_ps) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_loadu_ps(k: __mmask16, mem_addr: *const f32) -> __m512 { let mut dst: __m512; asm!( @@ -30681,6 +32866,7 @@ pub unsafe fn _mm512_maskz_loadu_ps(k: __mmask16, mem_addr: *const f32) -> __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_loadu_pd) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_loadu_pd(src: __m512d, k: __mmask8, mem_addr: *const f64) -> __m512d { let mut dst: __m512d = src; asm!( @@ -30700,6 +32886,7 @@ pub unsafe fn _mm512_mask_loadu_pd(src: __m512d, k: __mmask8, mem_addr: *const f /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_loadu_pd) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m512d { let mut dst: __m512d; asm!( @@ -30719,6 +32906,7 @@ pub unsafe fn _mm512_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_loadu_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_loadu_epi32(src: __m256i, k: __mmask8, mem_addr: *const i32) -> __m256i { let mut dst: __m256i = src; asm!( @@ -30738,6 +32926,7 @@ pub unsafe fn _mm256_mask_loadu_epi32(src: __m256i, k: __mmask8, mem_addr: *cons /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_loadu_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_loadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m256i { let mut dst: __m256i; asm!( @@ -30757,6 +32946,7 @@ pub unsafe fn _mm256_maskz_loadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_loadu_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_loadu_epi64(src: __m256i, k: __mmask8, mem_addr: *const i64) -> __m256i { let mut dst: __m256i = src; asm!( @@ -30776,6 +32966,7 @@ pub unsafe fn _mm256_mask_loadu_epi64(src: __m256i, k: __mmask8, mem_addr: *cons /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_loadu_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m256i { let mut dst: __m256i; asm!( @@ -30795,6 +32986,7 @@ pub unsafe fn _mm256_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_loadu_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_loadu_ps(src: __m256, k: __mmask8, mem_addr: *const f32) -> __m256 { let mut dst: __m256 = src; asm!( @@ -30814,6 +33006,7 @@ pub unsafe fn _mm256_mask_loadu_ps(src: __m256, k: __mmask8, mem_addr: *const f3 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_loadu_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_loadu_ps(k: __mmask8, mem_addr: *const f32) -> __m256 { let mut dst: __m256; asm!( @@ -30833,6 +33026,7 @@ pub unsafe fn _mm256_maskz_loadu_ps(k: __mmask8, mem_addr: *const f32) -> __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_loadu_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_loadu_pd(src: __m256d, k: __mmask8, mem_addr: *const f64) -> __m256d { let mut dst: __m256d = src; asm!( @@ -30852,6 +33046,7 @@ pub unsafe fn _mm256_mask_loadu_pd(src: __m256d, k: __mmask8, mem_addr: *const f /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_loadu_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m256d { let mut dst: __m256d; asm!( @@ -30871,6 +33066,7 @@ pub unsafe fn _mm256_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_loadu_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_loadu_epi32(src: __m128i, k: __mmask8, mem_addr: *const i32) -> __m128i { let mut dst: __m128i = src; asm!( @@ -30890,6 +33086,7 @@ pub unsafe fn _mm_mask_loadu_epi32(src: __m128i, k: __mmask8, mem_addr: *const i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_loadu_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_maskz_loadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m128i { let mut dst: __m128i; asm!( @@ -30909,6 +33106,7 @@ pub unsafe fn _mm_maskz_loadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_loadu_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_loadu_epi64(src: __m128i, k: __mmask8, mem_addr: *const i64) -> __m128i { let mut dst: __m128i = src; asm!( @@ -30928,6 +33126,7 @@ pub unsafe fn _mm_mask_loadu_epi64(src: __m128i, k: __mmask8, mem_addr: *const i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_loadu_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m128i { let mut dst: __m128i; asm!( @@ -30947,6 +33146,7 @@ pub unsafe fn _mm_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_loadu_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_loadu_ps(src: __m128, k: __mmask8, mem_addr: *const f32) -> __m128 { let mut dst: __m128 = src; asm!( @@ -30966,6 +33166,7 @@ pub unsafe fn _mm_mask_loadu_ps(src: __m128, k: __mmask8, mem_addr: *const f32) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_loadu_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_maskz_loadu_ps(k: __mmask8, mem_addr: *const f32) -> __m128 { let mut dst: __m128; asm!( @@ -30985,6 +33186,7 @@ pub unsafe fn _mm_maskz_loadu_ps(k: __mmask8, mem_addr: *const f32) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_loadu_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_loadu_pd(src: __m128d, k: __mmask8, mem_addr: *const f64) -> __m128d { let mut dst: __m128d = src; asm!( @@ -31004,6 +33206,7 @@ pub unsafe fn _mm_mask_loadu_pd(src: __m128d, k: __mmask8, mem_addr: *const f64) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_loadu_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m128d { let mut dst: __m128d; asm!( @@ -31023,6 +33226,7 @@ pub unsafe fn _mm_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_load_epi32) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_load_epi32(src: __m512i, k: __mmask16, mem_addr: *const i32) -> __m512i { let mut dst: __m512i = src; asm!( @@ -31042,6 +33246,7 @@ pub unsafe fn _mm512_mask_load_epi32(src: __m512i, k: __mmask16, mem_addr: *cons /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_load_epi32) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_load_epi32(k: __mmask16, mem_addr: *const i32) -> __m512i { let mut dst: __m512i; asm!( @@ -31061,6 +33266,7 @@ pub unsafe fn _mm512_maskz_load_epi32(k: __mmask16, mem_addr: *const i32) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_load_epi64) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_load_epi64(src: __m512i, k: __mmask8, mem_addr: *const i64) -> __m512i { let mut dst: __m512i = src; asm!( @@ -31080,6 +33286,7 @@ pub unsafe fn _mm512_mask_load_epi64(src: __m512i, k: __mmask8, mem_addr: *const /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_load_epi64) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m512i { let mut dst: __m512i; asm!( @@ -31099,6 +33306,7 @@ pub unsafe fn _mm512_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_load_ps) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_load_ps(src: __m512, k: __mmask16, mem_addr: *const f32) -> __m512 { let mut dst: __m512 = src; asm!( @@ -31118,6 +33326,7 @@ pub unsafe fn _mm512_mask_load_ps(src: __m512, k: __mmask16, mem_addr: *const f3 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_load_ps) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_load_ps(k: __mmask16, mem_addr: *const f32) -> __m512 { let mut dst: __m512; asm!( @@ -31137,6 +33346,7 @@ pub unsafe fn _mm512_maskz_load_ps(k: __mmask16, mem_addr: *const f32) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_load_pd) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_load_pd(src: __m512d, k: __mmask8, mem_addr: *const f64) -> __m512d { let mut dst: __m512d = src; asm!( @@ -31156,6 +33366,7 @@ pub unsafe fn _mm512_mask_load_pd(src: __m512d, k: __mmask8, mem_addr: *const f6 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_load_pd) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m512d { let mut dst: __m512d; asm!( @@ -31175,6 +33386,7 @@ pub unsafe fn _mm512_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m512d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_load_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_load_epi32(src: __m256i, k: __mmask8, mem_addr: *const i32) -> __m256i { let mut dst: __m256i = src; asm!( @@ -31194,6 +33406,7 @@ pub unsafe fn _mm256_mask_load_epi32(src: __m256i, k: __mmask8, mem_addr: *const /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_load_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_load_epi32(k: __mmask8, mem_addr: *const i32) -> __m256i { let mut dst: __m256i; asm!( @@ -31213,6 +33426,7 @@ pub unsafe fn _mm256_maskz_load_epi32(k: __mmask8, mem_addr: *const i32) -> __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_load_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_load_epi64(src: __m256i, k: __mmask8, mem_addr: *const i64) -> __m256i { let mut dst: __m256i = src; asm!( @@ -31232,6 +33446,7 @@ pub unsafe fn _mm256_mask_load_epi64(src: __m256i, k: __mmask8, mem_addr: *const /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_load_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m256i { let mut dst: __m256i; asm!( @@ -31251,6 +33466,7 @@ pub unsafe fn _mm256_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_load_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_load_ps(src: __m256, k: __mmask8, mem_addr: *const f32) -> __m256 { let mut dst: __m256 = src; asm!( @@ -31270,6 +33486,7 @@ pub unsafe fn _mm256_mask_load_ps(src: __m256, k: __mmask8, mem_addr: *const f32 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_load_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_load_ps(k: __mmask8, mem_addr: *const f32) -> __m256 { let mut dst: __m256; asm!( @@ -31289,6 +33506,7 @@ pub unsafe fn _mm256_maskz_load_ps(k: __mmask8, mem_addr: *const f32) -> __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_load_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_load_pd(src: __m256d, k: __mmask8, mem_addr: *const f64) -> __m256d { let mut dst: __m256d = src; asm!( @@ -31308,6 +33526,7 @@ pub unsafe fn _mm256_mask_load_pd(src: __m256d, k: __mmask8, mem_addr: *const f6 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_load_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m256d { let mut dst: __m256d; asm!( @@ -31327,6 +33546,7 @@ pub unsafe fn _mm256_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m256d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_load_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_load_epi32(src: __m128i, k: __mmask8, mem_addr: *const i32) -> __m128i { let mut dst: __m128i = src; asm!( @@ -31346,6 +33566,7 @@ pub unsafe fn _mm_mask_load_epi32(src: __m128i, k: __mmask8, mem_addr: *const i3 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_load_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_maskz_load_epi32(k: __mmask8, mem_addr: *const i32) -> __m128i { let mut dst: __m128i; asm!( @@ -31365,6 +33586,7 @@ pub unsafe fn _mm_maskz_load_epi32(k: __mmask8, mem_addr: *const i32) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_load_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_load_epi64(src: __m128i, k: __mmask8, mem_addr: *const i64) -> __m128i { let mut dst: __m128i = src; asm!( @@ -31384,6 +33606,7 @@ pub unsafe fn _mm_mask_load_epi64(src: __m128i, k: __mmask8, mem_addr: *const i6 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_load_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m128i { let mut dst: __m128i; asm!( @@ -31403,6 +33626,7 @@ pub unsafe fn _mm_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_load_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_load_ps(src: __m128, k: __mmask8, mem_addr: *const f32) -> __m128 { let mut dst: __m128 = src; asm!( @@ -31422,6 +33646,7 @@ pub unsafe fn _mm_mask_load_ps(src: __m128, k: __mmask8, mem_addr: *const f32) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_load_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_maskz_load_ps(k: __mmask8, mem_addr: *const f32) -> __m128 { let mut dst: __m128; asm!( @@ -31441,6 +33666,7 @@ pub unsafe fn _mm_maskz_load_ps(k: __mmask8, mem_addr: *const f32) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_load_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_load_pd(src: __m128d, k: __mmask8, mem_addr: *const f64) -> __m128d { let mut dst: __m128d = src; asm!( @@ -31460,6 +33686,7 @@ pub unsafe fn _mm_mask_load_pd(src: __m128d, k: __mmask8, mem_addr: *const f64) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_load_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m128d { let mut dst: __m128d; asm!( @@ -31478,6 +33705,7 @@ pub unsafe fn _mm_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_storeu_epi32) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask16, a: __m512i) { asm!( vps!("vmovdqu32", "{{{mask}}}, {a}"), @@ -31494,6 +33722,7 @@ pub unsafe fn _mm512_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask16, a: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_storeu_epi64) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m512i) { asm!( vps!("vmovdqu64", "{{{mask}}}, {a}"), @@ -31510,6 +33739,7 @@ pub unsafe fn _mm512_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_storeu_ps) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask16, a: __m512) { asm!( vps!("vmovups", "{{{mask}}}, {a}"), @@ -31526,6 +33756,7 @@ pub unsafe fn _mm512_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask16, a: __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_storeu_pd) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m512d) { asm!( vps!("vmovupd", "{{{mask}}}, {a}"), @@ -31542,6 +33773,7 @@ pub unsafe fn _mm512_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_storeu_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m256i) { asm!( vps!("vmovdqu32", "{{{mask}}}, {a}"), @@ -31558,6 +33790,7 @@ pub unsafe fn _mm256_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask8, a: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_storeu_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m256i) { asm!( vps!("vmovdqu64", "{{{mask}}}, {a}"), @@ -31574,6 +33807,7 @@ pub unsafe fn _mm256_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_storeu_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask8, a: __m256) { asm!( vps!("vmovups", "{{{mask}}}, {a}"), @@ -31590,6 +33824,7 @@ pub unsafe fn _mm256_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask8, a: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_storeu_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m256d) { asm!( vps!("vmovupd", "{{{mask}}}, {a}"), @@ -31606,6 +33841,7 @@ pub unsafe fn _mm256_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_storeu_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m128i) { asm!( vps!("vmovdqu32", "{{{mask}}}, {a}"), @@ -31622,6 +33858,7 @@ pub unsafe fn _mm_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_storeu_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m128i) { asm!( vps!("vmovdqu64", "{{{mask}}}, {a}"), @@ -31638,6 +33875,7 @@ pub unsafe fn _mm_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_storeu_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask8, a: __m128) { asm!( vps!("vmovups", "{{{mask}}}, {a}"), @@ -31654,6 +33892,7 @@ pub unsafe fn _mm_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask8, a: __m128) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_storeu_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m128d) { asm!( vps!("vmovupd", "{{{mask}}}, {a}"), @@ -31670,6 +33909,7 @@ pub unsafe fn _mm_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_store_epi32) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_store_epi32(mem_addr: *mut i32, mask: __mmask16, a: __m512i) { asm!( vps!("vmovdqa32", "{{{mask}}}, {a}"), @@ -31686,6 +33926,7 @@ pub unsafe fn _mm512_mask_store_epi32(mem_addr: *mut i32, mask: __mmask16, a: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_store_epi64) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m512i) { asm!( vps!("vmovdqa64", "{{{mask}}}, {a}"), @@ -31702,6 +33943,7 @@ pub unsafe fn _mm512_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_store_ps) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_store_ps(mem_addr: *mut f32, mask: __mmask16, a: __m512) { asm!( vps!("vmovaps", "{{{mask}}}, {a}"), @@ -31718,6 +33960,7 @@ pub unsafe fn _mm512_mask_store_ps(mem_addr: *mut f32, mask: __mmask16, a: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_store_pd) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m512d) { asm!( vps!("vmovapd", "{{{mask}}}, {a}"), @@ -31734,6 +33977,7 @@ pub unsafe fn _mm512_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_store_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_store_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m256i) { asm!( vps!("vmovdqa32", "{{{mask}}}, {a}"), @@ -31750,6 +33994,7 @@ pub unsafe fn _mm256_mask_store_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_store_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m256i) { asm!( vps!("vmovdqa64", "{{{mask}}}, {a}"), @@ -31766,6 +34011,7 @@ pub unsafe fn _mm256_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_store_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_store_ps(mem_addr: *mut f32, mask: __mmask8, a: __m256) { asm!( vps!("vmovaps", "{{{mask}}}, {a}"), @@ -31782,6 +34028,7 @@ pub unsafe fn _mm256_mask_store_ps(mem_addr: *mut f32, mask: __mmask8, a: __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_store_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m256d) { asm!( vps!("vmovapd", "{{{mask}}}, {a}"), @@ -31798,6 +34045,7 @@ pub unsafe fn _mm256_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_store_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_store_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m128i) { asm!( vps!("vmovdqa32", "{{{mask}}}, {a}"), @@ -31814,6 +34062,7 @@ pub unsafe fn _mm_mask_store_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_store_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m128i) { asm!( vps!("vmovdqa64", "{{{mask}}}, {a}"), @@ -31830,6 +34079,7 @@ pub unsafe fn _mm_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_store_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_store_ps(mem_addr: *mut f32, mask: __mmask8, a: __m128) { asm!( vps!("vmovaps", "{{{mask}}}, {a}"), @@ -31846,6 +34096,7 @@ pub unsafe fn _mm_mask_store_ps(mem_addr: *mut f32, mask: __mmask8, a: __m128) { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_store_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m128d) { asm!( vps!("vmovapd", "{{{mask}}}, {a}"), @@ -31861,6 +34112,7 @@ pub unsafe fn _mm_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expandloadu_epi32) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_expandloadu_epi32( src: __m512i, k: __mmask16, @@ -31882,6 +34134,7 @@ pub unsafe fn _mm512_mask_expandloadu_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expandloadu_epi32) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_expandloadu_epi32(k: __mmask16, mem_addr: *const i32) -> __m512i { let mut dst: __m512i; asm!( @@ -31899,6 +34152,7 @@ pub unsafe fn _mm512_maskz_expandloadu_epi32(k: __mmask16, mem_addr: *const i32) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expandloadu_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_expandloadu_epi32( src: __m256i, k: __mmask8, @@ -31920,6 +34174,7 @@ pub unsafe fn _mm256_mask_expandloadu_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expandloadu_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_expandloadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m256i { let mut dst: __m256i; asm!( @@ -31937,6 +34192,7 @@ pub unsafe fn _mm256_maskz_expandloadu_epi32(k: __mmask8, mem_addr: *const i32) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expandloadu_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_expandloadu_epi32( src: __m128i, k: __mmask8, @@ -31958,6 +34214,7 @@ pub unsafe fn _mm_mask_expandloadu_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expandloadu_epi32) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_maskz_expandloadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m128i { let mut dst: __m128i; asm!( @@ -31975,6 +34232,7 @@ pub unsafe fn _mm_maskz_expandloadu_epi32(k: __mmask8, mem_addr: *const i32) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expandloadu_epi64) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_expandloadu_epi64( src: __m512i, k: __mmask8, @@ -31996,6 +34254,7 @@ pub unsafe fn _mm512_mask_expandloadu_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expandloadu_epi64) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_expandloadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m512i { let mut dst: __m512i; asm!( @@ -32013,6 +34272,7 @@ pub unsafe fn _mm512_maskz_expandloadu_epi64(k: __mmask8, mem_addr: *const i64) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expandloadu_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_expandloadu_epi64( src: __m256i, k: __mmask8, @@ -32034,6 +34294,7 @@ pub unsafe fn _mm256_mask_expandloadu_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expandloadu_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_expandloadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m256i { let mut dst: __m256i; asm!( @@ -32051,6 +34312,7 @@ pub unsafe fn _mm256_maskz_expandloadu_epi64(k: __mmask8, mem_addr: *const i64) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expandloadu_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_expandloadu_epi64( src: __m128i, k: __mmask8, @@ -32072,6 +34334,7 @@ pub unsafe fn _mm_mask_expandloadu_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expandloadu_epi64) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_maskz_expandloadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m128i { let mut dst: __m128i; asm!( @@ -32089,6 +34352,7 @@ pub unsafe fn _mm_maskz_expandloadu_epi64(k: __mmask8, mem_addr: *const i64) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expandloadu_ps) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_expandloadu_ps( src: __m512, k: __mmask16, @@ -32110,6 +34374,7 @@ pub unsafe fn _mm512_mask_expandloadu_ps( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expandloadu_ps) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_expandloadu_ps(k: __mmask16, mem_addr: *const f32) -> __m512 { let mut dst: __m512; asm!( @@ -32127,6 +34392,7 @@ pub unsafe fn _mm512_maskz_expandloadu_ps(k: __mmask16, mem_addr: *const f32) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expandloadu_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_expandloadu_ps(src: __m256, k: __mmask8, mem_addr: *const f32) -> __m256 { let mut dst: __m256 = src; asm!( @@ -32144,6 +34410,7 @@ pub unsafe fn _mm256_mask_expandloadu_ps(src: __m256, k: __mmask8, mem_addr: *co /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expandloadu_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_expandloadu_ps(k: __mmask8, mem_addr: *const f32) -> __m256 { let mut dst: __m256; asm!( @@ -32161,6 +34428,7 @@ pub unsafe fn _mm256_maskz_expandloadu_ps(k: __mmask8, mem_addr: *const f32) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expandloadu_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_expandloadu_ps(src: __m128, k: __mmask8, mem_addr: *const f32) -> __m128 { let mut dst: __m128 = src; asm!( @@ -32178,6 +34446,7 @@ pub unsafe fn _mm_mask_expandloadu_ps(src: __m128, k: __mmask8, mem_addr: *const /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expandloadu_ps) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_maskz_expandloadu_ps(k: __mmask8, mem_addr: *const f32) -> __m128 { let mut dst: __m128; asm!( @@ -32195,6 +34464,7 @@ pub unsafe fn _mm_maskz_expandloadu_ps(k: __mmask8, mem_addr: *const f32) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expandloadu_pd) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_expandloadu_pd( src: __m512d, k: __mmask8, @@ -32216,6 +34486,7 @@ pub unsafe fn _mm512_mask_expandloadu_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expandloadu_pd) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_expandloadu_pd(k: __mmask8, mem_addr: *const f64) -> __m512d { let mut dst: __m512d; asm!( @@ -32233,6 +34504,7 @@ pub unsafe fn _mm512_maskz_expandloadu_pd(k: __mmask8, mem_addr: *const f64) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expandloadu_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_expandloadu_pd( src: __m256d, k: __mmask8, @@ -32254,6 +34526,7 @@ pub unsafe fn _mm256_mask_expandloadu_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expandloadu_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_expandloadu_pd(k: __mmask8, mem_addr: *const f64) -> __m256d { let mut dst: __m256d; asm!( @@ -32271,6 +34544,7 @@ pub unsafe fn _mm256_maskz_expandloadu_pd(k: __mmask8, mem_addr: *const f64) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expandloadu_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_expandloadu_pd(src: __m128d, k: __mmask8, mem_addr: *const f64) -> __m128d { let mut dst: __m128d = src; asm!( @@ -32288,6 +34562,7 @@ pub unsafe fn _mm_mask_expandloadu_pd(src: __m128d, k: __mmask8, mem_addr: *cons /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expandloadu_pd) #[inline] #[target_feature(enable = "avx512f,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_maskz_expandloadu_pd(k: __mmask8, mem_addr: *const f64) -> __m128d { let mut dst: __m128d; asm!( @@ -32305,6 +34580,7 @@ pub unsafe fn _mm_maskz_expandloadu_pd(k: __mmask8, mem_addr: *const f64) -> __m /// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setr_pd&expand=5002) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_setr_pd( e0: f64, e1: f64, @@ -32324,6 +34600,7 @@ pub unsafe fn _mm512_setr_pd( /// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set_pd&expand=4924) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_set_pd( e0: f64, e1: f64, @@ -32342,6 +34619,7 @@ pub unsafe fn _mm512_set_pd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_move_ss&expand=3832) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovss))] pub unsafe fn _mm_mask_move_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { let extractsrc: f32 = simd_extract(src, 0); @@ -32357,6 +34635,7 @@ pub unsafe fn _mm_mask_move_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_move_ss&expand=3833) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovss))] pub unsafe fn _mm_maskz_move_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { let mut mov: f32 = 0.; @@ -32371,6 +34650,7 @@ pub unsafe fn _mm_maskz_move_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_move_sd&expand=3829) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovsd))] pub unsafe fn _mm_mask_move_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let extractsrc: f64 = simd_extract(src, 0); @@ -32386,6 +34666,7 @@ pub unsafe fn _mm_mask_move_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_move_sd&expand=3830) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmovsd))] pub unsafe fn _mm_maskz_move_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let mut mov: f64 = 0.; @@ -32400,6 +34681,7 @@ pub unsafe fn _mm_maskz_move_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_add_ss&expand=159) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddss))] pub unsafe fn _mm_mask_add_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { let extractsrc: f32 = simd_extract(src, 0); @@ -32417,6 +34699,7 @@ pub unsafe fn _mm_mask_add_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_add_ss&expand=160) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddss))] pub unsafe fn _mm_maskz_add_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { let mut add: f32 = 0.; @@ -32433,6 +34716,7 @@ pub unsafe fn _mm_maskz_add_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_add_sd&expand=155) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddsd))] pub unsafe fn _mm_mask_add_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let extractsrc: f64 = simd_extract(src, 0); @@ -32450,6 +34734,7 @@ pub unsafe fn _mm_mask_add_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_add_sd&expand=156) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddsd))] pub unsafe fn _mm_maskz_add_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let mut add: f64 = 0.; @@ -32466,6 +34751,7 @@ pub unsafe fn _mm_maskz_add_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_sub_ss&expand=5750) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubss))] pub unsafe fn _mm_mask_sub_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { let extractsrc: f32 = simd_extract(src, 0); @@ -32483,6 +34769,7 @@ pub unsafe fn _mm_mask_sub_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_sub_ss&expand=5751) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubss))] pub unsafe fn _mm_maskz_sub_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { let mut add: f32 = 0.; @@ -32499,6 +34786,7 @@ pub unsafe fn _mm_maskz_sub_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_sub_sd&expand=5746) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubsd))] pub unsafe fn _mm_mask_sub_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let extractsrc: f64 = simd_extract(src, 0); @@ -32516,6 +34804,7 @@ pub unsafe fn _mm_mask_sub_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_sub_sd&expand=5747) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubsd))] pub unsafe fn _mm_maskz_sub_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let mut add: f64 = 0.; @@ -32532,6 +34821,7 @@ pub unsafe fn _mm_maskz_sub_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_mul_ss&expand=3950) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulss))] pub unsafe fn _mm_mask_mul_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { let extractsrc: f32 = simd_extract(src, 0); @@ -32549,6 +34839,7 @@ pub unsafe fn _mm_mask_mul_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_mul_ss&expand=3951) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulss))] pub unsafe fn _mm_maskz_mul_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { let mut add: f32 = 0.; @@ -32565,6 +34856,7 @@ pub unsafe fn _mm_maskz_mul_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_mul_sd&expand=3947) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulsd))] pub unsafe fn _mm_mask_mul_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let extractsrc: f64 = simd_extract(src, 0); @@ -32582,6 +34874,7 @@ pub unsafe fn _mm_mask_mul_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_mul_sd&expand=3948) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulsd))] pub unsafe fn _mm_maskz_mul_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let mut add: f64 = 0.; @@ -32598,6 +34891,7 @@ pub unsafe fn _mm_maskz_mul_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_div_ss&expand=2181) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivss))] pub unsafe fn _mm_mask_div_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { let extractsrc: f32 = simd_extract(src, 0); @@ -32615,6 +34909,7 @@ pub unsafe fn _mm_mask_div_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_div_ss&expand=2182) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivss))] pub unsafe fn _mm_maskz_div_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { let mut add: f32 = 0.; @@ -32631,6 +34926,7 @@ pub unsafe fn _mm_maskz_div_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_div_sd&expand=2178) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivsd))] pub unsafe fn _mm_mask_div_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let extractsrc: f64 = simd_extract(src, 0); @@ -32648,6 +34944,7 @@ pub unsafe fn _mm_mask_div_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_div_sd&expand=2179) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivsd))] pub unsafe fn _mm_maskz_div_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { let mut add: f64 = 0.; @@ -32664,6 +34961,7 @@ pub unsafe fn _mm_maskz_div_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_max_ss&expand=3672) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxss))] pub unsafe fn _mm_mask_max_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { transmute(vmaxss( @@ -32680,6 +34978,7 @@ pub unsafe fn _mm_mask_max_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_max_ss&expand=3673) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxss))] pub unsafe fn _mm_maskz_max_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { transmute(vmaxss( @@ -32696,6 +34995,7 @@ pub unsafe fn _mm_maskz_max_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_max_sd&expand=3669) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxsd))] pub unsafe fn _mm_mask_max_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { transmute(vmaxsd( @@ -32712,6 +35012,7 @@ pub unsafe fn _mm_mask_max_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_max_sd&expand=3670) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxsd))] pub unsafe fn _mm_maskz_max_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { transmute(vmaxsd( @@ -32728,6 +35029,7 @@ pub unsafe fn _mm_maskz_max_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_min_ss&expand=3786) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminss))] pub unsafe fn _mm_mask_min_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { transmute(vminss( @@ -32744,6 +35046,7 @@ pub unsafe fn _mm_mask_min_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_min_ss&expand=3787) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminss))] pub unsafe fn _mm_maskz_min_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { transmute(vminss( @@ -32760,6 +35063,7 @@ pub unsafe fn _mm_maskz_min_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_min_sd&expand=3783) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminsd))] pub unsafe fn _mm_mask_min_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { transmute(vminsd( @@ -32776,6 +35080,7 @@ pub unsafe fn _mm_mask_min_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_min_sd&expand=3784) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminsd))] pub unsafe fn _mm_maskz_min_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { transmute(vminsd( @@ -32792,6 +35097,7 @@ pub unsafe fn _mm_maskz_min_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_sqrt_ss&expand=5387) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtss))] pub unsafe fn _mm_mask_sqrt_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { transmute(vsqrtss( @@ -32808,6 +35114,7 @@ pub unsafe fn _mm_mask_sqrt_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_sqrt_ss&expand=5388) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtss))] pub unsafe fn _mm_maskz_sqrt_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { transmute(vsqrtss( @@ -32824,6 +35131,7 @@ pub unsafe fn _mm_maskz_sqrt_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_sqrt_sd&expand=5384) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtsd))] pub unsafe fn _mm_mask_sqrt_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { transmute(vsqrtsd( @@ -32840,6 +35148,7 @@ pub unsafe fn _mm_mask_sqrt_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_sqrt_sd&expand=5385) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtsd))] pub unsafe fn _mm_maskz_sqrt_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { transmute(vsqrtsd( @@ -32856,6 +35165,7 @@ pub unsafe fn _mm_maskz_sqrt_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_rsqrt14_ss&expand=4825) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14ss))] pub unsafe fn _mm_rsqrt14_ss(a: __m128, b: __m128) -> __m128 { transmute(vrsqrt14ss( @@ -32871,6 +35181,7 @@ pub unsafe fn _mm_rsqrt14_ss(a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_rsqrt14_ss&expand=4823) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14ss))] pub unsafe fn _mm_mask_rsqrt14_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { transmute(vrsqrt14ss(a.as_f32x4(), b.as_f32x4(), src.as_f32x4(), k)) @@ -32881,6 +35192,7 @@ pub unsafe fn _mm_mask_rsqrt14_ss(src: __m128, k: __mmask8, a: __m128, b: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_rsqrt14_ss&expand=4824) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14ss))] pub unsafe fn _mm_maskz_rsqrt14_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { transmute(vrsqrt14ss( @@ -32896,6 +35208,7 @@ pub unsafe fn _mm_maskz_rsqrt14_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_rsqrt14_sd&expand=4822) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14sd))] pub unsafe fn _mm_rsqrt14_sd(a: __m128d, b: __m128d) -> __m128d { transmute(vrsqrt14sd( @@ -32911,6 +35224,7 @@ pub unsafe fn _mm_rsqrt14_sd(a: __m128d, b: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_rsqrt14_sd&expand=4820) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14sd))] pub unsafe fn _mm_mask_rsqrt14_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { transmute(vrsqrt14sd(a.as_f64x2(), b.as_f64x2(), src.as_f64x2(), k)) @@ -32921,6 +35235,7 @@ pub unsafe fn _mm_mask_rsqrt14_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_rsqrt14_sd&expand=4821) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrsqrt14sd))] pub unsafe fn _mm_maskz_rsqrt14_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { transmute(vrsqrt14sd( @@ -32936,6 +35251,7 @@ pub unsafe fn _mm_maskz_rsqrt14_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_rcp14_ss&expand=4508) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14ss))] pub unsafe fn _mm_rcp14_ss(a: __m128, b: __m128) -> __m128 { transmute(vrcp14ss( @@ -32951,6 +35267,7 @@ pub unsafe fn _mm_rcp14_ss(a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_rcp14_ss&expand=4506) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14ss))] pub unsafe fn _mm_mask_rcp14_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { transmute(vrcp14ss(a.as_f32x4(), b.as_f32x4(), src.as_f32x4(), k)) @@ -32961,6 +35278,7 @@ pub unsafe fn _mm_mask_rcp14_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_rcp14_ss&expand=4507) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14ss))] pub unsafe fn _mm_maskz_rcp14_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { transmute(vrcp14ss( @@ -32976,6 +35294,7 @@ pub unsafe fn _mm_maskz_rcp14_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_rcp14_sd&expand=4505) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14sd))] pub unsafe fn _mm_rcp14_sd(a: __m128d, b: __m128d) -> __m128d { transmute(vrcp14sd( @@ -32991,6 +35310,7 @@ pub unsafe fn _mm_rcp14_sd(a: __m128d, b: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_rcp14_sd&expand=4503) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14sd))] pub unsafe fn _mm_mask_rcp14_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { transmute(vrcp14sd(a.as_f64x2(), b.as_f64x2(), src.as_f64x2(), k)) @@ -33001,6 +35321,7 @@ pub unsafe fn _mm_mask_rcp14_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_rcp14_sd&expand=4504) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrcp14sd))] pub unsafe fn _mm_maskz_rcp14_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { transmute(vrcp14sd( @@ -33016,6 +35337,7 @@ pub unsafe fn _mm_maskz_rcp14_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_getexp_ss&expand=2862) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpss))] pub unsafe fn _mm_getexp_ss(a: __m128, b: __m128) -> __m128 { transmute(vgetexpss( @@ -33032,6 +35354,7 @@ pub unsafe fn _mm_getexp_ss(a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_getexp_ss&expand=2863) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpss))] pub unsafe fn _mm_mask_getexp_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { transmute(vgetexpss( @@ -33048,6 +35371,7 @@ pub unsafe fn _mm_mask_getexp_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_getexp_ss&expand=2864) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpss))] pub unsafe fn _mm_maskz_getexp_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { transmute(vgetexpss( @@ -33064,6 +35388,7 @@ pub unsafe fn _mm_maskz_getexp_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_getexp_sd&expand=2859) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpsd))] pub unsafe fn _mm_getexp_sd(a: __m128d, b: __m128d) -> __m128d { transmute(vgetexpsd( @@ -33080,6 +35405,7 @@ pub unsafe fn _mm_getexp_sd(a: __m128d, b: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_getexp_sd&expand=2860) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpsd))] pub unsafe fn _mm_mask_getexp_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { transmute(vgetexpsd( @@ -33096,6 +35422,7 @@ pub unsafe fn _mm_mask_getexp_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_getexp_sd&expand=2861) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpsd))] pub unsafe fn _mm_maskz_getexp_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { transmute(vgetexpsd( @@ -33122,6 +35449,7 @@ pub unsafe fn _mm_maskz_getexp_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_getmant_ss&expand=2898) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantss, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm_getmant_ss< @@ -33155,6 +35483,7 @@ pub unsafe fn _mm_getmant_ss< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_getmant_ss&expand=2899) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantss, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(4, 5)] pub unsafe fn _mm_mask_getmant_ss< @@ -33190,6 +35519,7 @@ pub unsafe fn _mm_mask_getmant_ss< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_getmant_ss&expand=2900) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantss, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm_maskz_getmant_ss< @@ -33224,6 +35554,7 @@ pub unsafe fn _mm_maskz_getmant_ss< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_getmant_sd&expand=2895) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantsd, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm_getmant_sd< @@ -33257,6 +35588,7 @@ pub unsafe fn _mm_getmant_sd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_getmant_sd&expand=2896) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantsd, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(4, 5)] pub unsafe fn _mm_mask_getmant_sd< @@ -33292,6 +35624,7 @@ pub unsafe fn _mm_mask_getmant_sd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_getmant_sd&expand=2897) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantsd, NORM = 0, SIGN = 0))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm_maskz_getmant_sd< @@ -33322,6 +35655,7 @@ pub unsafe fn _mm_maskz_getmant_sd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_roundscale_ss&expand=4802) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 255))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_roundscale_ss(a: __m128, b: __m128) -> __m128 { @@ -33344,6 +35678,7 @@ pub unsafe fn _mm_roundscale_ss(a: __m128, b: __m128) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_roundscale_ss&expand=4800) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_roundscale_ss( @@ -33371,6 +35706,7 @@ pub unsafe fn _mm_mask_roundscale_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_roundscale_ss&expand=4801) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_roundscale_ss( @@ -33397,6 +35733,7 @@ pub unsafe fn _mm_maskz_roundscale_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_roundscale_sd&expand=4799) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 255))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_roundscale_sd(a: __m128d, b: __m128d) -> __m128d { @@ -33419,6 +35756,7 @@ pub unsafe fn _mm_roundscale_sd(a: __m128d, b: __m128d) -> __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_roundscale_sd&expand=4797) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_roundscale_sd( @@ -33446,6 +35784,7 @@ pub unsafe fn _mm_mask_roundscale_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_roundscale_sd&expand=4798) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_roundscale_sd( @@ -33466,6 +35805,7 @@ pub unsafe fn _mm_maskz_roundscale_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_scalef_ss&expand=4901) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefss))] pub unsafe fn _mm_scalef_ss(a: __m128, b: __m128) -> __m128 { let a = a.as_f32x4(); @@ -33479,6 +35819,7 @@ pub unsafe fn _mm_scalef_ss(a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_scalef_ss&expand=4899) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefss))] pub unsafe fn _mm_mask_scalef_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -> __m128 { let a = a.as_f32x4(); @@ -33492,6 +35833,7 @@ pub unsafe fn _mm_mask_scalef_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_scalef_ss&expand=4900) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefss))] pub unsafe fn _mm_maskz_scalef_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { transmute(vscalefss( @@ -33508,6 +35850,7 @@ pub unsafe fn _mm_maskz_scalef_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_scalef_sd&expand=4898) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefsd))] pub unsafe fn _mm_scalef_sd(a: __m128d, b: __m128d) -> __m128d { transmute(vscalefsd( @@ -33524,6 +35867,7 @@ pub unsafe fn _mm_scalef_sd(a: __m128d, b: __m128d) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_scalef_sd&expand=4896) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefsd))] pub unsafe fn _mm_mask_scalef_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d) -> __m128d { transmute(vscalefsd( @@ -33540,6 +35884,7 @@ pub unsafe fn _mm_mask_scalef_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_scalef_sd&expand=4897) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefsd))] pub unsafe fn _mm_maskz_scalef_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d { transmute(vscalefsd( @@ -33556,6 +35901,7 @@ pub unsafe fn _mm_maskz_scalef_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fmadd_ss&expand=2582) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd213ss))] pub unsafe fn _mm_mask_fmadd_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { let mut fmadd: f32 = simd_extract(a, 0); @@ -33572,6 +35918,7 @@ pub unsafe fn _mm_mask_fmadd_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fmadd_ss&expand=2584) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd213ss))] pub unsafe fn _mm_maskz_fmadd_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { let mut fmadd: f32 = 0.; @@ -33589,6 +35936,7 @@ pub unsafe fn _mm_maskz_fmadd_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fmadd_ss&expand=2583) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd213ss))] pub unsafe fn _mm_mask3_fmadd_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { let mut fmadd: f32 = simd_extract(c, 0); @@ -33605,6 +35953,7 @@ pub unsafe fn _mm_mask3_fmadd_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fmadd_sd&expand=2578) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd213sd))] pub unsafe fn _mm_mask_fmadd_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { let mut fmadd: f64 = simd_extract(a, 0); @@ -33621,6 +35970,7 @@ pub unsafe fn _mm_mask_fmadd_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fmadd_sd&expand=2580) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd213sd))] pub unsafe fn _mm_maskz_fmadd_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { let mut fmadd: f64 = 0.; @@ -33638,6 +35988,7 @@ pub unsafe fn _mm_maskz_fmadd_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fmadd_sd&expand=2579) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd213sd))] pub unsafe fn _mm_mask3_fmadd_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { let mut fmadd: f64 = simd_extract(c, 0); @@ -33654,6 +36005,7 @@ pub unsafe fn _mm_mask3_fmadd_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fmsub_ss&expand=2668) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub213ss))] pub unsafe fn _mm_mask_fmsub_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { let mut fmsub: f32 = simd_extract(a, 0); @@ -33671,6 +36023,7 @@ pub unsafe fn _mm_mask_fmsub_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fmsub_ss&expand=2670) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub213ss))] pub unsafe fn _mm_maskz_fmsub_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { let mut fmsub: f32 = 0.; @@ -33689,6 +36042,7 @@ pub unsafe fn _mm_maskz_fmsub_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fmsub_ss&expand=2669) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub213ss))] pub unsafe fn _mm_mask3_fmsub_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { let mut fmsub: f32 = simd_extract(c, 0); @@ -33706,6 +36060,7 @@ pub unsafe fn _mm_mask3_fmsub_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fmsub_sd&expand=2664) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub213sd))] pub unsafe fn _mm_mask_fmsub_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { let mut fmsub: f64 = simd_extract(a, 0); @@ -33723,6 +36078,7 @@ pub unsafe fn _mm_mask_fmsub_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fmsub_sd&expand=2666) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub213sd))] pub unsafe fn _mm_maskz_fmsub_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { let mut fmsub: f64 = 0.; @@ -33741,6 +36097,7 @@ pub unsafe fn _mm_maskz_fmsub_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fmsub_sd&expand=2665) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub213sd))] pub unsafe fn _mm_mask3_fmsub_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { let mut fmsub: f64 = simd_extract(c, 0); @@ -33758,6 +36115,7 @@ pub unsafe fn _mm_mask3_fmsub_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fnmadd_ss&expand=2748) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd213ss))] pub unsafe fn _mm_mask_fnmadd_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { let mut fnmadd: f32 = simd_extract(a, 0); @@ -33775,6 +36133,7 @@ pub unsafe fn _mm_mask_fnmadd_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fnmadd_ss&expand=2750) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd213ss))] pub unsafe fn _mm_maskz_fnmadd_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { let mut fnmadd: f32 = 0.; @@ -33793,6 +36152,7 @@ pub unsafe fn _mm_maskz_fnmadd_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fnmadd_ss&expand=2749) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd213ss))] pub unsafe fn _mm_mask3_fnmadd_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { let mut fnmadd: f32 = simd_extract(c, 0); @@ -33810,6 +36170,7 @@ pub unsafe fn _mm_mask3_fnmadd_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fnmadd_sd&expand=2744) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd213sd))] pub unsafe fn _mm_mask_fnmadd_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { let mut fnmadd: f64 = simd_extract(a, 0); @@ -33827,6 +36188,7 @@ pub unsafe fn _mm_mask_fnmadd_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fnmadd_sd&expand=2746) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd213sd))] pub unsafe fn _mm_maskz_fnmadd_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { let mut fnmadd: f64 = 0.; @@ -33845,6 +36207,7 @@ pub unsafe fn _mm_maskz_fnmadd_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fnmadd_sd&expand=2745) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd213sd))] pub unsafe fn _mm_mask3_fnmadd_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { let mut fnmadd: f64 = simd_extract(c, 0); @@ -33862,6 +36225,7 @@ pub unsafe fn _mm_mask3_fnmadd_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fnmsub_ss&expand=2796) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub213ss))] pub unsafe fn _mm_mask_fnmsub_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -> __m128 { let mut fnmsub: f32 = simd_extract(a, 0); @@ -33880,6 +36244,7 @@ pub unsafe fn _mm_mask_fnmsub_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fnmsub_ss&expand=2798) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub213ss))] pub unsafe fn _mm_maskz_fnmsub_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -> __m128 { let mut fnmsub: f32 = 0.; @@ -33899,6 +36264,7 @@ pub unsafe fn _mm_maskz_fnmsub_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fnmsub_ss&expand=2797) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub213ss))] pub unsafe fn _mm_mask3_fnmsub_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128 { let mut fnmsub: f32 = simd_extract(c, 0); @@ -33917,6 +36283,7 @@ pub unsafe fn _mm_mask3_fnmsub_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fnmsub_sd&expand=2792) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub213sd))] pub unsafe fn _mm_mask_fnmsub_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d) -> __m128d { let mut fnmsub: f64 = simd_extract(a, 0); @@ -33935,6 +36302,7 @@ pub unsafe fn _mm_mask_fnmsub_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fnmsub_sd&expand=2794) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub213sd))] pub unsafe fn _mm_maskz_fnmsub_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d) -> __m128d { let mut fnmsub: f64 = 0.; @@ -33954,6 +36322,7 @@ pub unsafe fn _mm_maskz_fnmsub_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fnmsub_sd&expand=2793) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub213sd))] pub unsafe fn _mm_mask3_fnmsub_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8) -> __m128d { let mut fnmsub: f64 = simd_extract(c, 0); @@ -33979,6 +36348,7 @@ pub unsafe fn _mm_mask3_fnmsub_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_round_ss&expand=151) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddss, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_add_round_ss(a: __m128, b: __m128) -> __m128 { @@ -34002,6 +36372,7 @@ pub unsafe fn _mm_add_round_ss(a: __m128, b: __m128) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_add_round_ss&expand=152) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_add_round_ss( @@ -34030,6 +36401,7 @@ pub unsafe fn _mm_mask_add_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_round_ss&expand=153) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddss, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_add_round_ss( @@ -34057,6 +36429,7 @@ pub unsafe fn _mm_maskz_add_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_round_sd&expand=148) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddsd, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_add_round_sd(a: __m128d, b: __m128d) -> __m128d { @@ -34080,6 +36453,7 @@ pub unsafe fn _mm_add_round_sd(a: __m128d, b: __m128d) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_add_round_Sd&expand=149) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddsd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_add_round_sd( @@ -34108,6 +36482,7 @@ pub unsafe fn _mm_mask_add_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_round_sd&expand=150) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaddsd, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_add_round_sd( @@ -34135,6 +36510,7 @@ pub unsafe fn _mm_maskz_add_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_sub_round_ss&expand=5745) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubss, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_sub_round_ss(a: __m128, b: __m128) -> __m128 { @@ -34158,6 +36534,7 @@ pub unsafe fn _mm_sub_round_ss(a: __m128, b: __m128) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_sub_round_ss&expand=5743) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_sub_round_ss( @@ -34186,6 +36563,7 @@ pub unsafe fn _mm_mask_sub_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_sub_round_ss&expand=5744) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubss, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_sub_round_ss( @@ -34213,6 +36591,7 @@ pub unsafe fn _mm_maskz_sub_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_sub_round_sd&expand=5742) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubsd, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_sub_round_sd(a: __m128d, b: __m128d) -> __m128d { @@ -34236,6 +36615,7 @@ pub unsafe fn _mm_sub_round_sd(a: __m128d, b: __m128d) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_sub_round_sd&expand=5740) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubsd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_sub_round_sd( @@ -34264,6 +36644,7 @@ pub unsafe fn _mm_mask_sub_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_sub_round_sd&expand=5741) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsubsd, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_sub_round_sd( @@ -34291,6 +36672,7 @@ pub unsafe fn _mm_maskz_sub_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mul_round_ss&expand=3946) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulss, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_mul_round_ss(a: __m128, b: __m128) -> __m128 { @@ -34314,6 +36696,7 @@ pub unsafe fn _mm_mul_round_ss(a: __m128, b: __m128) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_mul_round_ss&expand=3944) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_mul_round_ss( @@ -34342,6 +36725,7 @@ pub unsafe fn _mm_mask_mul_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_mul_round_ss&expand=3945) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulss, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_mul_round_ss( @@ -34369,6 +36753,7 @@ pub unsafe fn _mm_maskz_mul_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mul_round_sd&expand=3943) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulsd, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_mul_round_sd(a: __m128d, b: __m128d) -> __m128d { @@ -34392,6 +36777,7 @@ pub unsafe fn _mm_mul_round_sd(a: __m128d, b: __m128d) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_mul_round_sd&expand=3941) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulsd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_mul_round_sd( @@ -34420,6 +36806,7 @@ pub unsafe fn _mm_mask_mul_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_mul_round_sd&expand=3942) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmulsd, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_mul_round_sd( @@ -34447,6 +36834,7 @@ pub unsafe fn _mm_maskz_mul_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_div_round_ss&expand=2174) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivss, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_div_round_ss(a: __m128, b: __m128) -> __m128 { @@ -34470,6 +36858,7 @@ pub unsafe fn _mm_div_round_ss(a: __m128, b: __m128) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_div_round_ss&expand=2175) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_div_round_ss( @@ -34498,6 +36887,7 @@ pub unsafe fn _mm_mask_div_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_div_round_ss&expand=2176) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivss, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_div_round_ss( @@ -34525,6 +36915,7 @@ pub unsafe fn _mm_maskz_div_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_div_round_sd&expand=2171) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivsd, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_div_round_sd(a: __m128d, b: __m128d) -> __m128d { @@ -34548,6 +36939,7 @@ pub unsafe fn _mm_div_round_sd(a: __m128d, b: __m128d) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_div_round_sd&expand=2172) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivsd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_div_round_sd( @@ -34576,6 +36968,7 @@ pub unsafe fn _mm_mask_div_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_div_round_sd&expand=2173) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vdivsd, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_div_round_sd( @@ -34597,6 +36990,7 @@ pub unsafe fn _mm_maskz_div_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_max_round_ss&expand=3668) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxss, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_max_round_ss(a: __m128, b: __m128) -> __m128 { @@ -34614,6 +37008,7 @@ pub unsafe fn _mm_max_round_ss(a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_max_ss&expand=3672) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxss, SAE = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_max_round_ss( @@ -34636,6 +37031,7 @@ pub unsafe fn _mm_mask_max_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_max_round_ss&expand=3667) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxss, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_max_round_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { @@ -34653,6 +37049,7 @@ pub unsafe fn _mm_maskz_max_round_ss(k: __mmask8, a: __m128, b: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_max_round_sd&expand=3665) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxsd, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_max_round_sd(a: __m128d, b: __m128d) -> __m128d { @@ -34670,6 +37067,7 @@ pub unsafe fn _mm_max_round_sd(a: __m128d, b: __m128d) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_max_round_sd&expand=3663) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxsd, SAE = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_max_round_sd( @@ -34692,6 +37090,7 @@ pub unsafe fn _mm_mask_max_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_max_sd&expand=3670) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vmaxsd, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_max_round_sd( @@ -34713,6 +37112,7 @@ pub unsafe fn _mm_maskz_max_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_min_round_ss&expand=3782) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminss, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_min_round_ss(a: __m128, b: __m128) -> __m128 { @@ -34730,6 +37130,7 @@ pub unsafe fn _mm_min_round_ss(a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_min_round_Ss&expand=3780) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminss, SAE = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_min_round_ss( @@ -34752,6 +37153,7 @@ pub unsafe fn _mm_mask_min_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_min_round_ss&expand=3781) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminss, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_min_round_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 { @@ -34769,6 +37171,7 @@ pub unsafe fn _mm_maskz_min_round_ss(k: __mmask8, a: __m128, b: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_min_round_sd&expand=3779) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminsd, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_min_round_sd(a: __m128d, b: __m128d) -> __m128d { @@ -34786,6 +37189,7 @@ pub unsafe fn _mm_min_round_sd(a: __m128d, b: __m128d) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_min_round_sd&expand=3777) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminsd, SAE = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_min_round_sd( @@ -34808,6 +37212,7 @@ pub unsafe fn _mm_mask_min_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_min_round_Sd&expand=3778) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vminsd, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_min_round_sd( @@ -34835,6 +37240,7 @@ pub unsafe fn _mm_maskz_min_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_sqrt_round_ss&expand=5383) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtss, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_sqrt_round_ss(a: __m128, b: __m128) -> __m128 { @@ -34858,6 +37264,7 @@ pub unsafe fn _mm_sqrt_round_ss(a: __m128, b: __m128) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_sqrt_round_ss&expand=5381) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_sqrt_round_ss( @@ -34886,6 +37293,7 @@ pub unsafe fn _mm_mask_sqrt_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_sqrt_round_ss&expand=5382) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtss, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_sqrt_round_ss( @@ -34913,6 +37321,7 @@ pub unsafe fn _mm_maskz_sqrt_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_sqrt_round_sd&expand=5380) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtsd, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_sqrt_round_sd(a: __m128d, b: __m128d) -> __m128d { @@ -34936,6 +37345,7 @@ pub unsafe fn _mm_sqrt_round_sd(a: __m128d, b: __m128d) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_sqrt_round_sd&expand=5378) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtsd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_sqrt_round_sd( @@ -34964,6 +37374,7 @@ pub unsafe fn _mm_mask_sqrt_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_sqrt_round_sd&expand=5379) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vsqrtsd, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_sqrt_round_sd( @@ -34985,6 +37396,7 @@ pub unsafe fn _mm_maskz_sqrt_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_getexp_round_ss&expand=2856) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpss, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_getexp_round_ss(a: __m128, b: __m128) -> __m128 { @@ -35002,6 +37414,7 @@ pub unsafe fn _mm_getexp_round_ss(a: __m128, b: __m128) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_getexp_round_ss&expand=2857) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpss, SAE = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_getexp_round_ss( @@ -35024,6 +37437,7 @@ pub unsafe fn _mm_mask_getexp_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_getexp_round_ss&expand=2858) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpss, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_getexp_round_ss( @@ -35045,6 +37459,7 @@ pub unsafe fn _mm_maskz_getexp_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_getexp_round_sd&expand=2853) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpsd, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_getexp_round_sd(a: __m128d, b: __m128d) -> __m128d { @@ -35062,6 +37477,7 @@ pub unsafe fn _mm_getexp_round_sd(a: __m128d, b: __m128d) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_getexp_round_sd&expand=2854) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpsd, SAE = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_getexp_round_sd( @@ -35084,6 +37500,7 @@ pub unsafe fn _mm_mask_getexp_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_getexp_round_sd&expand=2855) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetexpsd, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_getexp_round_sd( @@ -35114,6 +37531,7 @@ pub unsafe fn _mm_maskz_getexp_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_getmant_round_ss&expand=2892) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantss, NORM = 0, SIGN = 0, SAE = 4))] #[rustc_legacy_const_generics(2, 3, 4)] pub unsafe fn _mm_getmant_round_ss< @@ -35149,6 +37567,7 @@ pub unsafe fn _mm_getmant_round_ss< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_getmant_round_ss&expand=2893) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantss, NORM = 0, SIGN = 0, SAE = 4))] #[rustc_legacy_const_generics(4, 5, 6)] pub unsafe fn _mm_mask_getmant_round_ss< @@ -35186,6 +37605,7 @@ pub unsafe fn _mm_mask_getmant_round_ss< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_getmant_round_ss&expand=2894) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantss, NORM = 0, SIGN = 0, SAE = 4))] #[rustc_legacy_const_generics(3, 4, 5)] pub unsafe fn _mm_maskz_getmant_round_ss< @@ -35222,6 +37642,7 @@ pub unsafe fn _mm_maskz_getmant_round_ss< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_getmant_round_sd&expand=2889) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantsd, NORM = 0, SIGN = 0, SAE = 4))] #[rustc_legacy_const_generics(2, 3, 4)] pub unsafe fn _mm_getmant_round_sd< @@ -35257,6 +37678,7 @@ pub unsafe fn _mm_getmant_round_sd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_getmant_round_sd&expand=2890) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantsd, NORM = 0, SIGN = 0, SAE = 4))] #[rustc_legacy_const_generics(4, 5, 6)] pub unsafe fn _mm_mask_getmant_round_sd< @@ -35294,6 +37716,7 @@ pub unsafe fn _mm_mask_getmant_round_sd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_getmant_round_sd&expand=2891) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgetmantsd, NORM = 0, SIGN = 0, SAE = 4))] #[rustc_legacy_const_generics(3, 4, 5)] pub unsafe fn _mm_maskz_getmant_round_sd< @@ -35327,6 +37750,7 @@ pub unsafe fn _mm_maskz_getmant_round_sd< /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_roundscale_round_ss&expand=4796) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm_roundscale_round_ss( @@ -35354,6 +37778,7 @@ pub unsafe fn _mm_roundscale_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_roundscale_round_ss&expand=4794) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(4, 5)] pub unsafe fn _mm_mask_roundscale_round_ss( @@ -35383,6 +37808,7 @@ pub unsafe fn _mm_mask_roundscale_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_roundscale_round_ss&expand=4795) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm_maskz_roundscale_round_ss( @@ -35411,6 +37837,7 @@ pub unsafe fn _mm_maskz_roundscale_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_roundscale_round_sd&expand=4793) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm_roundscale_round_sd( @@ -35438,6 +37865,7 @@ pub unsafe fn _mm_roundscale_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_roundscale_round_sd&expand=4791) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(4, 5)] pub unsafe fn _mm_mask_roundscale_round_sd( @@ -35467,6 +37895,7 @@ pub unsafe fn _mm_mask_roundscale_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_roundscale_round_sd&expand=4792) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm_maskz_roundscale_round_sd( @@ -35495,6 +37924,7 @@ pub unsafe fn _mm_maskz_roundscale_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_scalef_round_ss&expand=4895) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefss, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_scalef_round_ss(a: __m128, b: __m128) -> __m128 { @@ -35518,6 +37948,7 @@ pub unsafe fn _mm_scalef_round_ss(a: __m128, b: __m128) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_scalef_round_ss&expand=4893) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_scalef_round_ss( @@ -35546,6 +37977,7 @@ pub unsafe fn _mm_mask_scalef_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_scalef_round_ss&expand=4894) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefss, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_scalef_round_ss( @@ -35573,6 +38005,7 @@ pub unsafe fn _mm_maskz_scalef_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_scalef_round_sd&expand=4892) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefsd, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_scalef_round_sd(a: __m128d, b: __m128d) -> __m128d { @@ -35596,6 +38029,7 @@ pub unsafe fn _mm_scalef_round_sd(a: __m128d, b: __m128d) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_scalef_round_sd&expand=4890) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefsd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_scalef_round_sd( @@ -35623,6 +38057,7 @@ pub unsafe fn _mm_mask_scalef_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_scalef_round_sd&expand=4891) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vscalefsd, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_scalef_round_sd( @@ -35650,6 +38085,7 @@ pub unsafe fn _mm_maskz_scalef_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fmadd_round_ss&expand=2573) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd213ss, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_fmadd_round_ss(a: __m128, b: __m128, c: __m128) -> __m128 { @@ -35673,6 +38109,7 @@ pub unsafe fn _mm_fmadd_round_ss(a: __m128, b: __m128, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fmadd_round_ss&expand=2574) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd213ss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_fmadd_round_ss( @@ -35703,6 +38140,7 @@ pub unsafe fn _mm_mask_fmadd_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fmadd_round_ss&expand=2576) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd213ss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_maskz_fmadd_round_ss( @@ -35734,6 +38172,7 @@ pub unsafe fn _mm_maskz_fmadd_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fmadd_round_ss&expand=2575) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd213ss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask3_fmadd_round_ss( @@ -35764,6 +38203,7 @@ pub unsafe fn _mm_mask3_fmadd_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fmadd_round_sd&expand=2569) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd213sd, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_fmadd_round_sd( @@ -35791,6 +38231,7 @@ pub unsafe fn _mm_fmadd_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fmadd_round_sd&expand=2570) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd213sd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_fmadd_round_sd( @@ -35821,6 +38262,7 @@ pub unsafe fn _mm_mask_fmadd_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fmadd_round_sd&expand=2572) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd213sd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_maskz_fmadd_round_sd( @@ -35852,6 +38294,7 @@ pub unsafe fn _mm_maskz_fmadd_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fmadd_round_Sd&expand=2571) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmadd213sd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask3_fmadd_round_sd( @@ -35882,6 +38325,7 @@ pub unsafe fn _mm_mask3_fmadd_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fmsub_round_ss&expand=2659) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub213ss, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_fmsub_round_ss(a: __m128, b: __m128, c: __m128) -> __m128 { @@ -35906,6 +38350,7 @@ pub unsafe fn _mm_fmsub_round_ss(a: __m128, b: __m128, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fmsub_round_ss&expand=2660) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub213ss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_fmsub_round_ss( @@ -35937,6 +38382,7 @@ pub unsafe fn _mm_mask_fmsub_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fmsub_round_ss&expand=2662) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub213ss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_maskz_fmsub_round_ss( @@ -35969,6 +38415,7 @@ pub unsafe fn _mm_maskz_fmsub_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fmsub_round_ss&expand=2661) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub213ss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask3_fmsub_round_ss( @@ -36000,6 +38447,7 @@ pub unsafe fn _mm_mask3_fmsub_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fmsub_round_sd&expand=2655) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub213sd, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_fmsub_round_sd( @@ -36028,6 +38476,7 @@ pub unsafe fn _mm_fmsub_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fmsub_round_sd&expand=2656) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub213sd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_fmsub_round_sd( @@ -36059,6 +38508,7 @@ pub unsafe fn _mm_mask_fmsub_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fmsub_round_sd&expand=2658) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub213sd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_maskz_fmsub_round_sd( @@ -36091,6 +38541,7 @@ pub unsafe fn _mm_maskz_fmsub_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fmsub_round_sd&expand=2657) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfmsub213sd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask3_fmsub_round_sd( @@ -36122,6 +38573,7 @@ pub unsafe fn _mm_mask3_fmsub_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fnmadd_round_ss&expand=2739) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd213ss, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_fnmadd_round_ss(a: __m128, b: __m128, c: __m128) -> __m128 { @@ -36146,6 +38598,7 @@ pub unsafe fn _mm_fnmadd_round_ss(a: __m128, b: __m128, c: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fnmadd_round_ss&expand=2740) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd213ss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_fnmadd_round_ss( @@ -36177,6 +38630,7 @@ pub unsafe fn _mm_mask_fnmadd_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fnmadd_round_ss&expand=2742) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd213ss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_maskz_fnmadd_round_ss( @@ -36209,6 +38663,7 @@ pub unsafe fn _mm_maskz_fnmadd_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fnmadd_round_ss&expand=2741) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd213ss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask3_fnmadd_round_ss( @@ -36240,6 +38695,7 @@ pub unsafe fn _mm_mask3_fnmadd_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fnmadd_round_sd&expand=2735) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd213sd, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_fnmadd_round_sd( @@ -36268,6 +38724,7 @@ pub unsafe fn _mm_fnmadd_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fnmadd_round_sd&expand=2736) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd213sd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_fnmadd_round_sd( @@ -36299,6 +38756,7 @@ pub unsafe fn _mm_mask_fnmadd_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fnmadd_round_sd&expand=2738) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd213sd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_maskz_fnmadd_round_sd( @@ -36331,6 +38789,7 @@ pub unsafe fn _mm_maskz_fnmadd_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fnmadd_round_Sd&expand=2737) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmadd213sd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask3_fnmadd_round_sd( @@ -36362,6 +38821,7 @@ pub unsafe fn _mm_mask3_fnmadd_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fnmsub_round_ss&expand=2787) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub213ss, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_fnmsub_round_ss(a: __m128, b: __m128, c: __m128) -> __m128 { @@ -36387,6 +38847,7 @@ pub unsafe fn _mm_fnmsub_round_ss(a: __m128, b: __m128, c: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fnmsub_round_ss&expand=2788) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub213ss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_fnmsub_round_ss( @@ -36419,6 +38880,7 @@ pub unsafe fn _mm_mask_fnmsub_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fnmsub_round_ss&expand=2790) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub213ss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_maskz_fnmsub_round_ss( @@ -36452,6 +38914,7 @@ pub unsafe fn _mm_maskz_fnmsub_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fnmsub_round_ss&expand=2789) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub213ss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask3_fnmsub_round_ss( @@ -36484,6 +38947,7 @@ pub unsafe fn _mm_mask3_fnmsub_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fnmsub_round_sd&expand=2783) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub213sd, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_fnmsub_round_sd( @@ -36513,6 +38977,7 @@ pub unsafe fn _mm_fnmsub_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fnmsub_round_sd&expand=2784) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub213sd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_fnmsub_round_sd( @@ -36545,6 +39010,7 @@ pub unsafe fn _mm_mask_fnmsub_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fnmsub_round_sd&expand=2786) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub213sd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_maskz_fnmsub_round_sd( @@ -36578,6 +39044,7 @@ pub unsafe fn _mm_maskz_fnmsub_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fnmsub_round_sd&expand=2785) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfnmsub213sd, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask3_fnmsub_round_sd( @@ -36603,6 +39070,7 @@ pub unsafe fn _mm_mask3_fnmsub_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fixupimm_ss&expand=2517) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_fixupimm_ss(a: __m128, b: __m128, c: __m128i) -> __m128 { @@ -36621,6 +39089,7 @@ pub unsafe fn _mm_fixupimm_ss(a: __m128, b: __m128, c: __m128i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fixupimm_ss&expand=2518) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_fixupimm_ss( @@ -36644,6 +39113,7 @@ pub unsafe fn _mm_mask_fixupimm_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fixupimm_ss&expand=2519) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_maskz_fixupimm_ss( @@ -36667,6 +39137,7 @@ pub unsafe fn _mm_maskz_fixupimm_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fixupimm_sd&expand=2514) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_fixupimm_sd(a: __m128d, b: __m128d, c: __m128i) -> __m128d { @@ -36685,6 +39156,7 @@ pub unsafe fn _mm_fixupimm_sd(a: __m128d, b: __m128d, c: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fixupimm_sd&expand=2515) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_fixupimm_sd( @@ -36708,6 +39180,7 @@ pub unsafe fn _mm_mask_fixupimm_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fixupimm_sd&expand=2516) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_maskz_fixupimm_sd( @@ -36732,6 +39205,7 @@ pub unsafe fn _mm_maskz_fixupimm_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fixupimm_round_ss&expand=2511) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm_fixupimm_round_ss( @@ -36756,6 +39230,7 @@ pub unsafe fn _mm_fixupimm_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fixupimm_round_ss&expand=2512) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(4, 5)] pub unsafe fn _mm_mask_fixupimm_round_ss( @@ -36781,6 +39256,7 @@ pub unsafe fn _mm_mask_fixupimm_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fixupimm_round_ss&expand=2513) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(4, 5)] pub unsafe fn _mm_maskz_fixupimm_round_ss( @@ -36806,6 +39282,7 @@ pub unsafe fn _mm_maskz_fixupimm_round_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fixupimm_round_sd&expand=2508) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(3, 4)] pub unsafe fn _mm_fixupimm_round_sd( @@ -36830,6 +39307,7 @@ pub unsafe fn _mm_fixupimm_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fixupimm_round_sd&expand=2509) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(4, 5)] pub unsafe fn _mm_mask_fixupimm_round_sd( @@ -36855,6 +39333,7 @@ pub unsafe fn _mm_mask_fixupimm_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fixupimm_round_sd&expand=2510) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0, SAE = 8))] #[rustc_legacy_const_generics(4, 5)] pub unsafe fn _mm_maskz_fixupimm_round_sd( @@ -36879,6 +39358,7 @@ pub unsafe fn _mm_maskz_fixupimm_round_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_cvtss_sd&expand=1896) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2sd))] pub unsafe fn _mm_mask_cvtss_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128) -> __m128d { transmute(vcvtss2sd( @@ -36895,6 +39375,7 @@ pub unsafe fn _mm_mask_cvtss_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_cvtss_sd&expand=1897) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2sd))] pub unsafe fn _mm_maskz_cvtss_sd(k: __mmask8, a: __m128d, b: __m128) -> __m128d { transmute(vcvtss2sd( @@ -36911,6 +39392,7 @@ pub unsafe fn _mm_maskz_cvtss_sd(k: __mmask8, a: __m128d, b: __m128) -> __m128d /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_cvtsd_ss&expand=1797) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2ss))] pub unsafe fn _mm_mask_cvtsd_ss(src: __m128, k: __mmask8, a: __m128, b: __m128d) -> __m128 { transmute(vcvtsd2ss( @@ -36927,6 +39409,7 @@ pub unsafe fn _mm_mask_cvtsd_ss(src: __m128, k: __mmask8, a: __m128, b: __m128d) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_cvtsd_ss&expand=1798) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2ss))] pub unsafe fn _mm_maskz_cvtsd_ss(k: __mmask8, a: __m128, b: __m128d) -> __m128 { transmute(vcvtsd2ss( @@ -36944,6 +39427,7 @@ pub unsafe fn _mm_maskz_cvtsd_ss(k: __mmask8, a: __m128, b: __m128d) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundss_sd&expand=1371) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2sd, SAE = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_cvt_roundss_sd(a: __m128d, b: __m128) -> __m128d { @@ -36961,6 +39445,7 @@ pub unsafe fn _mm_cvt_roundss_sd(a: __m128d, b: __m128) -> __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_cvt_roundss_sd&expand=1372) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2sd, SAE = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_cvt_roundss_sd( @@ -36983,6 +39468,7 @@ pub unsafe fn _mm_mask_cvt_roundss_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_cvt_roundss_sd&expand=1373) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2sd, SAE = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_cvt_roundss_sd( @@ -37009,6 +39495,7 @@ pub unsafe fn _mm_maskz_cvt_roundss_sd( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundsd_ss&expand=1361) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2ss, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_cvt_roundsd_ss(a: __m128, b: __m128d) -> __m128 { @@ -37031,6 +39518,7 @@ pub unsafe fn _mm_cvt_roundsd_ss(a: __m128, b: __m128d) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_cvt_roundsd_ss&expand=1362) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2ss, ROUNDING = 8))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_cvt_roundsd_ss( @@ -37058,6 +39546,7 @@ pub unsafe fn _mm_mask_cvt_roundsd_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_cvt_roundsd_ss&expand=1363) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2ss, ROUNDING = 8))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_cvt_roundsd_ss( @@ -37084,6 +39573,7 @@ pub unsafe fn _mm_maskz_cvt_roundsd_ss( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundss_si32&expand=1374) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2si, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvt_roundss_si32(a: __m128) -> i32 { @@ -37103,6 +39593,7 @@ pub unsafe fn _mm_cvt_roundss_si32(a: __m128) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundss_i32&expand=1369) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2si, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvt_roundss_i32(a: __m128) -> i32 { @@ -37122,6 +39613,7 @@ pub unsafe fn _mm_cvt_roundss_i32(a: __m128) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundss_u32&expand=1376) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2usi, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvt_roundss_u32(a: __m128) -> u32 { @@ -37135,6 +39627,7 @@ pub unsafe fn _mm_cvt_roundss_u32(a: __m128) -> u32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvtss_i32&expand=1893) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2si))] pub unsafe fn _mm_cvtss_i32(a: __m128) -> i32 { vcvtss2si(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION) @@ -37145,6 +39638,7 @@ pub unsafe fn _mm_cvtss_i32(a: __m128) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvtss_u32&expand=1901) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2usi))] pub unsafe fn _mm_cvtss_u32(a: __m128) -> u32 { vcvtss2usi(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION) @@ -37161,6 +39655,7 @@ pub unsafe fn _mm_cvtss_u32(a: __m128) -> u32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundsd_si32&expand=1359) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2si, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvt_roundsd_si32(a: __m128d) -> i32 { @@ -37180,6 +39675,7 @@ pub unsafe fn _mm_cvt_roundsd_si32(a: __m128d) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundsd_i32&expand=1357) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2si, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvt_roundsd_i32(a: __m128d) -> i32 { @@ -37199,6 +39695,7 @@ pub unsafe fn _mm_cvt_roundsd_i32(a: __m128d) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=cvt_roundsd_u32&expand=1364) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2usi, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvt_roundsd_u32(a: __m128d) -> u32 { @@ -37212,6 +39709,7 @@ pub unsafe fn _mm_cvt_roundsd_u32(a: __m128d) -> u32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvtsd_i32&expand=1791) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2si))] pub unsafe fn _mm_cvtsd_i32(a: __m128d) -> i32 { vcvtsd2si(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION) @@ -37222,6 +39720,7 @@ pub unsafe fn _mm_cvtsd_i32(a: __m128d) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvtsd_u32&expand=1799) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2usi))] pub unsafe fn _mm_cvtsd_u32(a: __m128d) -> u32 { vcvtsd2usi(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION) @@ -37239,6 +39738,7 @@ pub unsafe fn _mm_cvtsd_u32(a: __m128d) -> u32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundi32_ss&expand=1312) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsi2ss, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_cvt_roundi32_ss(a: __m128, b: i32) -> __m128 { @@ -37260,6 +39760,7 @@ pub unsafe fn _mm_cvt_roundi32_ss(a: __m128, b: i32) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundsi32_ss&expand=1366) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsi2ss, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_cvt_roundsi32_ss(a: __m128, b: i32) -> __m128 { @@ -37280,6 +39781,7 @@ pub unsafe fn _mm_cvt_roundsi32_ss(a: __m128, b: i32) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundu32_ss&expand=1378) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtusi2ss, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_cvt_roundu32_ss(a: __m128, b: u32) -> __m128 { @@ -37294,6 +39796,7 @@ pub unsafe fn _mm_cvt_roundu32_ss(a: __m128, b: u32) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvti32_ss&expand=1643) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsi2ss))] pub unsafe fn _mm_cvti32_ss(a: __m128, b: i32) -> __m128 { let b = b as f32; @@ -37305,6 +39808,7 @@ pub unsafe fn _mm_cvti32_ss(a: __m128, b: i32) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvti32_sd&expand=1642) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsi2sd))] pub unsafe fn _mm_cvti32_sd(a: __m128d, b: i32) -> __m128d { let b = b as f64; @@ -37317,6 +39821,7 @@ pub unsafe fn _mm_cvti32_sd(a: __m128d, b: i32) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvtt_roundss_Si32&expand=1936) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2si, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvtt_roundss_si32(a: __m128) -> i32 { @@ -37331,6 +39836,7 @@ pub unsafe fn _mm_cvtt_roundss_si32(a: __m128) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvtt_roundss_i32&expand=1934) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2si, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvtt_roundss_i32(a: __m128) -> i32 { @@ -37345,6 +39851,7 @@ pub unsafe fn _mm_cvtt_roundss_i32(a: __m128) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvtt_roundss_u32&expand=1938) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2usi, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvtt_roundss_u32(a: __m128) -> u32 { @@ -37358,6 +39865,7 @@ pub unsafe fn _mm_cvtt_roundss_u32(a: __m128) -> u32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttss_i32&expand=2022) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2si))] pub unsafe fn _mm_cvttss_i32(a: __m128) -> i32 { vcvtss2si(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION) @@ -37368,6 +39876,7 @@ pub unsafe fn _mm_cvttss_i32(a: __m128) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttss_u32&expand=2026) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2usi))] pub unsafe fn _mm_cvttss_u32(a: __m128) -> u32 { vcvtss2usi(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION) @@ -37379,6 +39888,7 @@ pub unsafe fn _mm_cvttss_u32(a: __m128) -> u32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_roundsd_si32&expand=1930) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2si, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvtt_roundsd_si32(a: __m128d) -> i32 { @@ -37393,6 +39903,7 @@ pub unsafe fn _mm_cvtt_roundsd_si32(a: __m128d) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_roundsd_i32&expand=1928) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2si, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvtt_roundsd_i32(a: __m128d) -> i32 { @@ -37407,6 +39918,7 @@ pub unsafe fn _mm_cvtt_roundsd_i32(a: __m128d) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvtt_roundsd_u32&expand=1932) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2usi, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvtt_roundsd_u32(a: __m128d) -> u32 { @@ -37420,6 +39932,7 @@ pub unsafe fn _mm_cvtt_roundsd_u32(a: __m128d) -> u32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_i32&expand=2015) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2si))] pub unsafe fn _mm_cvttsd_i32(a: __m128d) -> i32 { vcvtsd2si(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION) @@ -37430,6 +39943,7 @@ pub unsafe fn _mm_cvttsd_i32(a: __m128d) -> i32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_u32&expand=2020) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2usi))] pub unsafe fn _mm_cvttsd_u32(a: __m128d) -> u32 { vcvtsd2usi(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION) @@ -37440,6 +39954,7 @@ pub unsafe fn _mm_cvttsd_u32(a: __m128d) -> u32 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtu32_ss&expand=2032) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtusi2ss))] pub unsafe fn _mm_cvtu32_ss(a: __m128, b: u32) -> __m128 { let b = b as f32; @@ -37451,6 +39966,7 @@ pub unsafe fn _mm_cvtu32_ss(a: __m128, b: u32) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtu32_sd&expand=2031) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtusi2sd))] pub unsafe fn _mm_cvtu32_sd(a: __m128d, b: u32) -> __m128d { let b = b as f64; @@ -37463,6 +39979,7 @@ pub unsafe fn _mm_cvtu32_sd(a: __m128d, b: u32) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comi_round_ss&expand=1175) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 5, SAE = 4))] //should be vcomiss #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm_comi_round_ss(a: __m128, b: __m128) -> i32 { @@ -37479,6 +39996,7 @@ pub unsafe fn _mm_comi_round_ss(a: __m128, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comi_round_sd&expand=1174) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcmp, IMM5 = 5, SAE = 4))] //should be vcomisd #[rustc_legacy_const_generics(2, 3)] pub unsafe fn _mm_comi_round_sd(a: __m128d, b: __m128d) -> i32 { @@ -37490,293 +40008,564 @@ pub unsafe fn _mm_comi_round_sd(a: __m128d, b: } /// Equal +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_CMPINT_EQ: _MM_CMPINT_ENUM = 0x00; /// Less-than +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_CMPINT_LT: _MM_CMPINT_ENUM = 0x01; /// Less-than-or-equal +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_CMPINT_LE: _MM_CMPINT_ENUM = 0x02; /// False +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_CMPINT_FALSE: _MM_CMPINT_ENUM = 0x03; /// Not-equal +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_CMPINT_NE: _MM_CMPINT_ENUM = 0x04; /// Not less-than +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_CMPINT_NLT: _MM_CMPINT_ENUM = 0x05; /// Not less-than-or-equal +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_CMPINT_NLE: _MM_CMPINT_ENUM = 0x06; /// True +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_CMPINT_TRUE: _MM_CMPINT_ENUM = 0x07; /// interval [1, 2) +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_MANT_NORM_1_2: _MM_MANTISSA_NORM_ENUM = 0x00; /// interval [0.5, 2) +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_MANT_NORM_P5_2: _MM_MANTISSA_NORM_ENUM = 0x01; /// interval [0.5, 1) +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_MANT_NORM_P5_1: _MM_MANTISSA_NORM_ENUM = 0x02; /// interval [0.75, 1.5) +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_MANT_NORM_P75_1P5: _MM_MANTISSA_NORM_ENUM = 0x03; /// sign = sign(SRC) +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_MANT_SIGN_SRC: _MM_MANTISSA_SIGN_ENUM = 0x00; /// sign = 0 +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_MANT_SIGN_ZERO: _MM_MANTISSA_SIGN_ENUM = 0x01; /// DEST = NaN if sign(SRC) = 1 +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_MANT_SIGN_NAN: _MM_MANTISSA_SIGN_ENUM = 0x02; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_AAAA: _MM_PERM_ENUM = 0x00; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_AAAB: _MM_PERM_ENUM = 0x01; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_AAAC: _MM_PERM_ENUM = 0x02; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_AAAD: _MM_PERM_ENUM = 0x03; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_AABA: _MM_PERM_ENUM = 0x04; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_AABB: _MM_PERM_ENUM = 0x05; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_AABC: _MM_PERM_ENUM = 0x06; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_AABD: _MM_PERM_ENUM = 0x07; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_AACA: _MM_PERM_ENUM = 0x08; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_AACB: _MM_PERM_ENUM = 0x09; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_AACC: _MM_PERM_ENUM = 0x0A; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_AACD: _MM_PERM_ENUM = 0x0B; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_AADA: _MM_PERM_ENUM = 0x0C; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_AADB: _MM_PERM_ENUM = 0x0D; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_AADC: _MM_PERM_ENUM = 0x0E; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_AADD: _MM_PERM_ENUM = 0x0F; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ABAA: _MM_PERM_ENUM = 0x10; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ABAB: _MM_PERM_ENUM = 0x11; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ABAC: _MM_PERM_ENUM = 0x12; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ABAD: _MM_PERM_ENUM = 0x13; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ABBA: _MM_PERM_ENUM = 0x14; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ABBB: _MM_PERM_ENUM = 0x15; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ABBC: _MM_PERM_ENUM = 0x16; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ABBD: _MM_PERM_ENUM = 0x17; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ABCA: _MM_PERM_ENUM = 0x18; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ABCB: _MM_PERM_ENUM = 0x19; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ABCC: _MM_PERM_ENUM = 0x1A; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ABCD: _MM_PERM_ENUM = 0x1B; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ABDA: _MM_PERM_ENUM = 0x1C; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ABDB: _MM_PERM_ENUM = 0x1D; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ABDC: _MM_PERM_ENUM = 0x1E; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ABDD: _MM_PERM_ENUM = 0x1F; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ACAA: _MM_PERM_ENUM = 0x20; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ACAB: _MM_PERM_ENUM = 0x21; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ACAC: _MM_PERM_ENUM = 0x22; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ACAD: _MM_PERM_ENUM = 0x23; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ACBA: _MM_PERM_ENUM = 0x24; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ACBB: _MM_PERM_ENUM = 0x25; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ACBC: _MM_PERM_ENUM = 0x26; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ACBD: _MM_PERM_ENUM = 0x27; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ACCA: _MM_PERM_ENUM = 0x28; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ACCB: _MM_PERM_ENUM = 0x29; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ACCC: _MM_PERM_ENUM = 0x2A; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ACCD: _MM_PERM_ENUM = 0x2B; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ACDA: _MM_PERM_ENUM = 0x2C; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ACDB: _MM_PERM_ENUM = 0x2D; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ACDC: _MM_PERM_ENUM = 0x2E; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ACDD: _MM_PERM_ENUM = 0x2F; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ADAA: _MM_PERM_ENUM = 0x30; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ADAB: _MM_PERM_ENUM = 0x31; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ADAC: _MM_PERM_ENUM = 0x32; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ADAD: _MM_PERM_ENUM = 0x33; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ADBA: _MM_PERM_ENUM = 0x34; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ADBB: _MM_PERM_ENUM = 0x35; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ADBC: _MM_PERM_ENUM = 0x36; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ADBD: _MM_PERM_ENUM = 0x37; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ADCA: _MM_PERM_ENUM = 0x38; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ADCB: _MM_PERM_ENUM = 0x39; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ADCC: _MM_PERM_ENUM = 0x3A; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ADCD: _MM_PERM_ENUM = 0x3B; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ADDA: _MM_PERM_ENUM = 0x3C; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ADDB: _MM_PERM_ENUM = 0x3D; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ADDC: _MM_PERM_ENUM = 0x3E; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_ADDD: _MM_PERM_ENUM = 0x3F; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BAAA: _MM_PERM_ENUM = 0x40; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BAAB: _MM_PERM_ENUM = 0x41; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BAAC: _MM_PERM_ENUM = 0x42; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BAAD: _MM_PERM_ENUM = 0x43; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BABA: _MM_PERM_ENUM = 0x44; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BABB: _MM_PERM_ENUM = 0x45; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BABC: _MM_PERM_ENUM = 0x46; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BABD: _MM_PERM_ENUM = 0x47; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BACA: _MM_PERM_ENUM = 0x48; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BACB: _MM_PERM_ENUM = 0x49; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BACC: _MM_PERM_ENUM = 0x4A; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BACD: _MM_PERM_ENUM = 0x4B; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BADA: _MM_PERM_ENUM = 0x4C; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BADB: _MM_PERM_ENUM = 0x4D; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BADC: _MM_PERM_ENUM = 0x4E; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BADD: _MM_PERM_ENUM = 0x4F; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BBAA: _MM_PERM_ENUM = 0x50; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BBAB: _MM_PERM_ENUM = 0x51; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BBAC: _MM_PERM_ENUM = 0x52; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BBAD: _MM_PERM_ENUM = 0x53; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BBBA: _MM_PERM_ENUM = 0x54; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BBBB: _MM_PERM_ENUM = 0x55; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BBBC: _MM_PERM_ENUM = 0x56; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BBBD: _MM_PERM_ENUM = 0x57; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BBCA: _MM_PERM_ENUM = 0x58; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BBCB: _MM_PERM_ENUM = 0x59; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BBCC: _MM_PERM_ENUM = 0x5A; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BBCD: _MM_PERM_ENUM = 0x5B; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BBDA: _MM_PERM_ENUM = 0x5C; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BBDB: _MM_PERM_ENUM = 0x5D; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BBDC: _MM_PERM_ENUM = 0x5E; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BBDD: _MM_PERM_ENUM = 0x5F; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BCAA: _MM_PERM_ENUM = 0x60; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BCAB: _MM_PERM_ENUM = 0x61; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BCAC: _MM_PERM_ENUM = 0x62; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BCAD: _MM_PERM_ENUM = 0x63; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BCBA: _MM_PERM_ENUM = 0x64; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BCBB: _MM_PERM_ENUM = 0x65; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BCBC: _MM_PERM_ENUM = 0x66; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BCBD: _MM_PERM_ENUM = 0x67; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BCCA: _MM_PERM_ENUM = 0x68; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BCCB: _MM_PERM_ENUM = 0x69; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BCCC: _MM_PERM_ENUM = 0x6A; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BCCD: _MM_PERM_ENUM = 0x6B; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BCDA: _MM_PERM_ENUM = 0x6C; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BCDB: _MM_PERM_ENUM = 0x6D; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BCDC: _MM_PERM_ENUM = 0x6E; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BCDD: _MM_PERM_ENUM = 0x6F; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BDAA: _MM_PERM_ENUM = 0x70; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BDAB: _MM_PERM_ENUM = 0x71; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BDAC: _MM_PERM_ENUM = 0x72; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BDAD: _MM_PERM_ENUM = 0x73; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BDBA: _MM_PERM_ENUM = 0x74; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BDBB: _MM_PERM_ENUM = 0x75; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BDBC: _MM_PERM_ENUM = 0x76; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BDBD: _MM_PERM_ENUM = 0x77; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BDCA: _MM_PERM_ENUM = 0x78; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BDCB: _MM_PERM_ENUM = 0x79; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BDCC: _MM_PERM_ENUM = 0x7A; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BDCD: _MM_PERM_ENUM = 0x7B; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BDDA: _MM_PERM_ENUM = 0x7C; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BDDB: _MM_PERM_ENUM = 0x7D; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BDDC: _MM_PERM_ENUM = 0x7E; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_BDDD: _MM_PERM_ENUM = 0x7F; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CAAA: _MM_PERM_ENUM = 0x80; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CAAB: _MM_PERM_ENUM = 0x81; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CAAC: _MM_PERM_ENUM = 0x82; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CAAD: _MM_PERM_ENUM = 0x83; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CABA: _MM_PERM_ENUM = 0x84; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CABB: _MM_PERM_ENUM = 0x85; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CABC: _MM_PERM_ENUM = 0x86; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CABD: _MM_PERM_ENUM = 0x87; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CACA: _MM_PERM_ENUM = 0x88; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CACB: _MM_PERM_ENUM = 0x89; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CACC: _MM_PERM_ENUM = 0x8A; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CACD: _MM_PERM_ENUM = 0x8B; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CADA: _MM_PERM_ENUM = 0x8C; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CADB: _MM_PERM_ENUM = 0x8D; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CADC: _MM_PERM_ENUM = 0x8E; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CADD: _MM_PERM_ENUM = 0x8F; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CBAA: _MM_PERM_ENUM = 0x90; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CBAB: _MM_PERM_ENUM = 0x91; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CBAC: _MM_PERM_ENUM = 0x92; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CBAD: _MM_PERM_ENUM = 0x93; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CBBA: _MM_PERM_ENUM = 0x94; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CBBB: _MM_PERM_ENUM = 0x95; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CBBC: _MM_PERM_ENUM = 0x96; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CBBD: _MM_PERM_ENUM = 0x97; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CBCA: _MM_PERM_ENUM = 0x98; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CBCB: _MM_PERM_ENUM = 0x99; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CBCC: _MM_PERM_ENUM = 0x9A; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CBCD: _MM_PERM_ENUM = 0x9B; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CBDA: _MM_PERM_ENUM = 0x9C; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CBDB: _MM_PERM_ENUM = 0x9D; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CBDC: _MM_PERM_ENUM = 0x9E; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CBDD: _MM_PERM_ENUM = 0x9F; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CCAA: _MM_PERM_ENUM = 0xA0; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CCAB: _MM_PERM_ENUM = 0xA1; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CCAC: _MM_PERM_ENUM = 0xA2; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CCAD: _MM_PERM_ENUM = 0xA3; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CCBA: _MM_PERM_ENUM = 0xA4; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CCBB: _MM_PERM_ENUM = 0xA5; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CCBC: _MM_PERM_ENUM = 0xA6; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CCBD: _MM_PERM_ENUM = 0xA7; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CCCA: _MM_PERM_ENUM = 0xA8; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CCCB: _MM_PERM_ENUM = 0xA9; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CCCC: _MM_PERM_ENUM = 0xAA; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CCCD: _MM_PERM_ENUM = 0xAB; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CCDA: _MM_PERM_ENUM = 0xAC; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CCDB: _MM_PERM_ENUM = 0xAD; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CCDC: _MM_PERM_ENUM = 0xAE; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CCDD: _MM_PERM_ENUM = 0xAF; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CDAA: _MM_PERM_ENUM = 0xB0; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CDAB: _MM_PERM_ENUM = 0xB1; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CDAC: _MM_PERM_ENUM = 0xB2; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CDAD: _MM_PERM_ENUM = 0xB3; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CDBA: _MM_PERM_ENUM = 0xB4; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CDBB: _MM_PERM_ENUM = 0xB5; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CDBC: _MM_PERM_ENUM = 0xB6; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CDBD: _MM_PERM_ENUM = 0xB7; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CDCA: _MM_PERM_ENUM = 0xB8; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CDCB: _MM_PERM_ENUM = 0xB9; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CDCC: _MM_PERM_ENUM = 0xBA; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CDCD: _MM_PERM_ENUM = 0xBB; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CDDA: _MM_PERM_ENUM = 0xBC; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CDDB: _MM_PERM_ENUM = 0xBD; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CDDC: _MM_PERM_ENUM = 0xBE; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_CDDD: _MM_PERM_ENUM = 0xBF; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DAAA: _MM_PERM_ENUM = 0xC0; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DAAB: _MM_PERM_ENUM = 0xC1; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DAAC: _MM_PERM_ENUM = 0xC2; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DAAD: _MM_PERM_ENUM = 0xC3; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DABA: _MM_PERM_ENUM = 0xC4; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DABB: _MM_PERM_ENUM = 0xC5; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DABC: _MM_PERM_ENUM = 0xC6; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DABD: _MM_PERM_ENUM = 0xC7; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DACA: _MM_PERM_ENUM = 0xC8; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DACB: _MM_PERM_ENUM = 0xC9; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DACC: _MM_PERM_ENUM = 0xCA; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DACD: _MM_PERM_ENUM = 0xCB; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DADA: _MM_PERM_ENUM = 0xCC; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DADB: _MM_PERM_ENUM = 0xCD; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DADC: _MM_PERM_ENUM = 0xCE; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DADD: _MM_PERM_ENUM = 0xCF; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DBAA: _MM_PERM_ENUM = 0xD0; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DBAB: _MM_PERM_ENUM = 0xD1; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DBAC: _MM_PERM_ENUM = 0xD2; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DBAD: _MM_PERM_ENUM = 0xD3; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DBBA: _MM_PERM_ENUM = 0xD4; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DBBB: _MM_PERM_ENUM = 0xD5; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DBBC: _MM_PERM_ENUM = 0xD6; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DBBD: _MM_PERM_ENUM = 0xD7; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DBCA: _MM_PERM_ENUM = 0xD8; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DBCB: _MM_PERM_ENUM = 0xD9; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DBCC: _MM_PERM_ENUM = 0xDA; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DBCD: _MM_PERM_ENUM = 0xDB; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DBDA: _MM_PERM_ENUM = 0xDC; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DBDB: _MM_PERM_ENUM = 0xDD; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DBDC: _MM_PERM_ENUM = 0xDE; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DBDD: _MM_PERM_ENUM = 0xDF; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DCAA: _MM_PERM_ENUM = 0xE0; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DCAB: _MM_PERM_ENUM = 0xE1; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DCAC: _MM_PERM_ENUM = 0xE2; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DCAD: _MM_PERM_ENUM = 0xE3; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DCBA: _MM_PERM_ENUM = 0xE4; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DCBB: _MM_PERM_ENUM = 0xE5; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DCBC: _MM_PERM_ENUM = 0xE6; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DCBD: _MM_PERM_ENUM = 0xE7; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DCCA: _MM_PERM_ENUM = 0xE8; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DCCB: _MM_PERM_ENUM = 0xE9; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DCCC: _MM_PERM_ENUM = 0xEA; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DCCD: _MM_PERM_ENUM = 0xEB; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DCDA: _MM_PERM_ENUM = 0xEC; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DCDB: _MM_PERM_ENUM = 0xED; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DCDC: _MM_PERM_ENUM = 0xEE; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DCDD: _MM_PERM_ENUM = 0xEF; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DDAA: _MM_PERM_ENUM = 0xF0; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DDAB: _MM_PERM_ENUM = 0xF1; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DDAC: _MM_PERM_ENUM = 0xF2; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DDAD: _MM_PERM_ENUM = 0xF3; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DDBA: _MM_PERM_ENUM = 0xF4; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DDBB: _MM_PERM_ENUM = 0xF5; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DDBC: _MM_PERM_ENUM = 0xF6; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DDBD: _MM_PERM_ENUM = 0xF7; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DDCA: _MM_PERM_ENUM = 0xF8; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DDCB: _MM_PERM_ENUM = 0xF9; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DDCC: _MM_PERM_ENUM = 0xFA; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DDCD: _MM_PERM_ENUM = 0xFB; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DDDA: _MM_PERM_ENUM = 0xFC; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DDDB: _MM_PERM_ENUM = 0xFD; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DDDC: _MM_PERM_ENUM = 0xFE; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub const _MM_PERM_DDDD: _MM_PERM_ENUM = 0xFF; #[allow(improper_ctypes)] diff --git a/crates/core_arch/src/x86/avx512ifma.rs b/crates/core_arch/src/x86/avx512ifma.rs index 128f0db25d..332d2316d5 100644 --- a/crates/core_arch/src/x86/avx512ifma.rs +++ b/crates/core_arch/src/x86/avx512ifma.rs @@ -12,6 +12,7 @@ use stdarch_test::assert_instr; /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#avx512techs=AVX512IFMA52&expand=3488) #[inline] #[target_feature(enable = "avx512ifma")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52huq))] pub unsafe fn _mm512_madd52hi_epu64(a: __m512i, b: __m512i, c: __m512i) -> __m512i { vpmadd52huq_512(a, b, c) @@ -26,6 +27,7 @@ pub unsafe fn _mm512_madd52hi_epu64(a: __m512i, b: __m512i, c: __m512i) -> __m51 /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=3497&avx512techs=AVX512IFMA52) #[inline] #[target_feature(enable = "avx512ifma")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52luq))] pub unsafe fn _mm512_madd52lo_epu64(a: __m512i, b: __m512i, c: __m512i) -> __m512i { vpmadd52luq_512(a, b, c) @@ -40,6 +42,7 @@ pub unsafe fn _mm512_madd52lo_epu64(a: __m512i, b: __m512i, c: __m512i) -> __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=vpmadd52&avx512techs=AVX512IFMA52,AVX512VL&expand=3485) #[inline] #[target_feature(enable = "avx512ifma,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52huq))] pub unsafe fn _mm256_madd52hi_epu64(a: __m256i, b: __m256i, c: __m256i) -> __m256i { vpmadd52huq_256(a, b, c) @@ -54,6 +57,7 @@ pub unsafe fn _mm256_madd52hi_epu64(a: __m256i, b: __m256i, c: __m256i) -> __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=vpmadd52&avx512techs=AVX512IFMA52,AVX512VL&expand=3494) #[inline] #[target_feature(enable = "avx512ifma,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52luq))] pub unsafe fn _mm256_madd52lo_epu64(a: __m256i, b: __m256i, c: __m256i) -> __m256i { vpmadd52luq_256(a, b, c) @@ -68,6 +72,7 @@ pub unsafe fn _mm256_madd52lo_epu64(a: __m256i, b: __m256i, c: __m256i) -> __m25 /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=3488,3482&text=vpmadd52&avx512techs=AVX512IFMA52,AVX512VL) #[inline] #[target_feature(enable = "avx512ifma,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52huq))] pub unsafe fn _mm_madd52hi_epu64(a: __m128i, b: __m128i, c: __m128i) -> __m128i { vpmadd52huq_128(a, b, c) @@ -82,6 +87,7 @@ pub unsafe fn _mm_madd52hi_epu64(a: __m128i, b: __m128i, c: __m128i) -> __m128i /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=3488,3491&text=vpmadd52&avx512techs=AVX512IFMA52,AVX512VL) #[inline] #[target_feature(enable = "avx512ifma,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52luq))] pub unsafe fn _mm_madd52lo_epu64(a: __m128i, b: __m128i, c: __m128i) -> __m128i { vpmadd52luq_128(a, b, c) diff --git a/crates/core_arch/src/x86/avx512vbmi.rs b/crates/core_arch/src/x86/avx512vbmi.rs index cd3800d383..0bd3f26219 100644 --- a/crates/core_arch/src/x86/avx512vbmi.rs +++ b/crates/core_arch/src/x86/avx512vbmi.rs @@ -8,6 +8,7 @@ use stdarch_test::assert_instr; /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutex2var_epi8&expand=4262) #[inline] #[target_feature(enable = "avx512vbmi")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //should be vpermi2b pub unsafe fn _mm512_permutex2var_epi8(a: __m512i, idx: __m512i, b: __m512i) -> __m512i { transmute(vpermi2b(a.as_i8x64(), idx.as_i8x64(), b.as_i8x64())) @@ -18,6 +19,7 @@ pub unsafe fn _mm512_permutex2var_epi8(a: __m512i, idx: __m512i, b: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutex2var_epi8&expand=4259) #[inline] #[target_feature(enable = "avx512vbmi")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2b))] pub unsafe fn _mm512_mask_permutex2var_epi8( a: __m512i, @@ -34,6 +36,7 @@ pub unsafe fn _mm512_mask_permutex2var_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutex2var_epi8&expand=4261) #[inline] #[target_feature(enable = "avx512vbmi")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //should be vpermi2b pub unsafe fn _mm512_maskz_permutex2var_epi8( k: __mmask64, @@ -51,6 +54,7 @@ pub unsafe fn _mm512_maskz_permutex2var_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask2_permutex2var_epi8&expand=4260) #[inline] #[target_feature(enable = "avx512vbmi")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermi2b))] pub unsafe fn _mm512_mask2_permutex2var_epi8( a: __m512i, @@ -67,6 +71,7 @@ pub unsafe fn _mm512_mask2_permutex2var_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutex2var_epi8&expand=4258) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //should be vpermi2b pub unsafe fn _mm256_permutex2var_epi8(a: __m256i, idx: __m256i, b: __m256i) -> __m256i { transmute(vpermi2b256(a.as_i8x32(), idx.as_i8x32(), b.as_i8x32())) @@ -77,6 +82,7 @@ pub unsafe fn _mm256_permutex2var_epi8(a: __m256i, idx: __m256i, b: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutex2var_epi8&expand=4255) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2b))] pub unsafe fn _mm256_mask_permutex2var_epi8( a: __m256i, @@ -93,6 +99,7 @@ pub unsafe fn _mm256_mask_permutex2var_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutex2var_epi8&expand=4257) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //should be vpermi2b pub unsafe fn _mm256_maskz_permutex2var_epi8( k: __mmask32, @@ -110,6 +117,7 @@ pub unsafe fn _mm256_maskz_permutex2var_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask2_permutex2var_epi8&expand=4256) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermi2b))] pub unsafe fn _mm256_mask2_permutex2var_epi8( a: __m256i, @@ -126,6 +134,7 @@ pub unsafe fn _mm256_mask2_permutex2var_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutex2var_epi8&expand=4254) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //should be vpermi2b pub unsafe fn _mm_permutex2var_epi8(a: __m128i, idx: __m128i, b: __m128i) -> __m128i { transmute(vpermi2b128(a.as_i8x16(), idx.as_i8x16(), b.as_i8x16())) @@ -136,6 +145,7 @@ pub unsafe fn _mm_permutex2var_epi8(a: __m128i, idx: __m128i, b: __m128i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutex2var_epi8&expand=4251) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermt2b))] pub unsafe fn _mm_mask_permutex2var_epi8( a: __m128i, @@ -152,6 +162,7 @@ pub unsafe fn _mm_mask_permutex2var_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutex2var_epi8&expand=4253) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vperm))] //should be vpermi2b pub unsafe fn _mm_maskz_permutex2var_epi8( k: __mmask16, @@ -169,6 +180,7 @@ pub unsafe fn _mm_maskz_permutex2var_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask2_permutex2var_epi8&expand=4252) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermi2b))] pub unsafe fn _mm_mask2_permutex2var_epi8( a: __m128i, @@ -185,6 +197,7 @@ pub unsafe fn _mm_mask2_permutex2var_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutexvar_epi8&expand=4316) #[inline] #[target_feature(enable = "avx512vbmi")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermb))] pub unsafe fn _mm512_permutexvar_epi8(idx: __m512i, a: __m512i) -> __m512i { transmute(vpermb(a.as_i8x64(), idx.as_i8x64())) @@ -195,6 +208,7 @@ pub unsafe fn _mm512_permutexvar_epi8(idx: __m512i, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutexvar_epi8&expand=4314) #[inline] #[target_feature(enable = "avx512vbmi")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermb))] pub unsafe fn _mm512_mask_permutexvar_epi8( src: __m512i, @@ -211,6 +225,7 @@ pub unsafe fn _mm512_mask_permutexvar_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutexvar_epi8&expand=4315) #[inline] #[target_feature(enable = "avx512vbmi")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermb))] pub unsafe fn _mm512_maskz_permutexvar_epi8(k: __mmask64, idx: __m512i, a: __m512i) -> __m512i { let permute = _mm512_permutexvar_epi8(idx, a).as_i8x64(); @@ -223,6 +238,7 @@ pub unsafe fn _mm512_maskz_permutexvar_epi8(k: __mmask64, idx: __m512i, a: __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutexvar_epi8&expand=4313) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermb))] pub unsafe fn _mm256_permutexvar_epi8(idx: __m256i, a: __m256i) -> __m256i { transmute(vpermb256(a.as_i8x32(), idx.as_i8x32())) @@ -233,6 +249,7 @@ pub unsafe fn _mm256_permutexvar_epi8(idx: __m256i, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutexvar_epi8&expand=4311) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermb))] pub unsafe fn _mm256_mask_permutexvar_epi8( src: __m256i, @@ -249,6 +266,7 @@ pub unsafe fn _mm256_mask_permutexvar_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutexvar_epi8&expand=4312) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermb))] pub unsafe fn _mm256_maskz_permutexvar_epi8(k: __mmask32, idx: __m256i, a: __m256i) -> __m256i { let permute = _mm256_permutexvar_epi8(idx, a).as_i8x32(); @@ -261,6 +279,7 @@ pub unsafe fn _mm256_maskz_permutexvar_epi8(k: __mmask32, idx: __m256i, a: __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutexvar_epi8&expand=4310) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermb))] pub unsafe fn _mm_permutexvar_epi8(idx: __m128i, a: __m128i) -> __m128i { transmute(vpermb128(a.as_i8x16(), idx.as_i8x16())) @@ -271,6 +290,7 @@ pub unsafe fn _mm_permutexvar_epi8(idx: __m128i, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutexvar_epi8&expand=4308) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermb))] pub unsafe fn _mm_mask_permutexvar_epi8( src: __m128i, @@ -287,6 +307,7 @@ pub unsafe fn _mm_mask_permutexvar_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutexvar_epi8&expand=4309) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpermb))] pub unsafe fn _mm_maskz_permutexvar_epi8(k: __mmask16, idx: __m128i, a: __m128i) -> __m128i { let permute = _mm_permutexvar_epi8(idx, a).as_i8x16(); @@ -299,6 +320,7 @@ pub unsafe fn _mm_maskz_permutexvar_epi8(k: __mmask16, idx: __m128i, a: __m128i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_multishift_epi64_epi8&expand=4026) #[inline] #[target_feature(enable = "avx512vbmi")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmultishiftqb))] pub unsafe fn _mm512_multishift_epi64_epi8(a: __m512i, b: __m512i) -> __m512i { transmute(vpmultishiftqb(a.as_i8x64(), b.as_i8x64())) @@ -309,6 +331,7 @@ pub unsafe fn _mm512_multishift_epi64_epi8(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_multishift_epi64_epi8&expand=4024) #[inline] #[target_feature(enable = "avx512vbmi")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmultishiftqb))] pub unsafe fn _mm512_mask_multishift_epi64_epi8( src: __m512i, @@ -325,6 +348,7 @@ pub unsafe fn _mm512_mask_multishift_epi64_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_multishift_epi64_epi8&expand=4025) #[inline] #[target_feature(enable = "avx512vbmi")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmultishiftqb))] pub unsafe fn _mm512_maskz_multishift_epi64_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let multishift = _mm512_multishift_epi64_epi8(a, b).as_i8x64(); @@ -337,6 +361,7 @@ pub unsafe fn _mm512_maskz_multishift_epi64_epi8(k: __mmask64, a: __m512i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_multishift_epi64_epi8&expand=4023) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmultishiftqb))] pub unsafe fn _mm256_multishift_epi64_epi8(a: __m256i, b: __m256i) -> __m256i { transmute(vpmultishiftqb256(a.as_i8x32(), b.as_i8x32())) @@ -347,6 +372,7 @@ pub unsafe fn _mm256_multishift_epi64_epi8(a: __m256i, b: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_multishift_epi64_epi8&expand=4021) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmultishiftqb))] pub unsafe fn _mm256_mask_multishift_epi64_epi8( src: __m256i, @@ -363,6 +389,7 @@ pub unsafe fn _mm256_mask_multishift_epi64_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_multishift_epi64_epi8&expand=4022) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmultishiftqb))] pub unsafe fn _mm256_maskz_multishift_epi64_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let multishift = _mm256_multishift_epi64_epi8(a, b).as_i8x32(); @@ -375,6 +402,7 @@ pub unsafe fn _mm256_maskz_multishift_epi64_epi8(k: __mmask32, a: __m256i, b: __ /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/IntrinsicsGuide/#text=_mm_multishift_epi64_epi8&expand=4020) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmultishiftqb))] pub unsafe fn _mm_multishift_epi64_epi8(a: __m128i, b: __m128i) -> __m128i { transmute(vpmultishiftqb128(a.as_i8x16(), b.as_i8x16())) @@ -385,6 +413,7 @@ pub unsafe fn _mm_multishift_epi64_epi8(a: __m128i, b: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_multishift_epi64_epi8&expand=4018) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmultishiftqb))] pub unsafe fn _mm_mask_multishift_epi64_epi8( src: __m128i, @@ -401,6 +430,7 @@ pub unsafe fn _mm_mask_multishift_epi64_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_multishift_epi64_epi8&expand=4019) #[inline] #[target_feature(enable = "avx512vbmi,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmultishiftqb))] pub unsafe fn _mm_maskz_multishift_epi64_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let multishift = _mm_multishift_epi64_epi8(a, b).as_i8x16(); diff --git a/crates/core_arch/src/x86/avx512vbmi2.rs b/crates/core_arch/src/x86/avx512vbmi2.rs index 404443e9e6..4068633251 100644 --- a/crates/core_arch/src/x86/avx512vbmi2.rs +++ b/crates/core_arch/src/x86/avx512vbmi2.rs @@ -11,6 +11,7 @@ use stdarch_test::assert_instr; /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expandloadu_epi16) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_expandloadu_epi16( src: __m512i, k: __mmask32, @@ -32,6 +33,7 @@ pub unsafe fn _mm512_mask_expandloadu_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expandloadu_epi16) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_expandloadu_epi16(k: __mmask32, mem_addr: *const i16) -> __m512i { let mut dst: __m512i; asm!( @@ -49,6 +51,7 @@ pub unsafe fn _mm512_maskz_expandloadu_epi16(k: __mmask32, mem_addr: *const i16) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expandloadu_epi16) #[inline] #[target_feature(enable = "avx512f,avx512vbmi2,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_expandloadu_epi16( src: __m256i, k: __mmask16, @@ -70,6 +73,7 @@ pub unsafe fn _mm256_mask_expandloadu_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expandloadu_epi16) #[inline] #[target_feature(enable = "avx512f,avx512vbmi2,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_expandloadu_epi16(k: __mmask16, mem_addr: *const i16) -> __m256i { let mut dst: __m256i; asm!( @@ -87,6 +91,7 @@ pub unsafe fn _mm256_maskz_expandloadu_epi16(k: __mmask16, mem_addr: *const i16) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expandloadu_epi16) #[inline] #[target_feature(enable = "avx512f,avx512vbmi2,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_expandloadu_epi16( src: __m128i, k: __mmask8, @@ -108,6 +113,7 @@ pub unsafe fn _mm_mask_expandloadu_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expandloadu_epi16) #[inline] #[target_feature(enable = "avx512f,avx512vbmi2,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_maskz_expandloadu_epi16(k: __mmask8, mem_addr: *const i16) -> __m128i { let mut dst: __m128i; asm!( @@ -125,6 +131,7 @@ pub unsafe fn _mm_maskz_expandloadu_epi16(k: __mmask8, mem_addr: *const i16) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expandloadu_epi8) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_mask_expandloadu_epi8( src: __m512i, k: __mmask64, @@ -146,6 +153,7 @@ pub unsafe fn _mm512_mask_expandloadu_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expandloadu_epi8) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm512_maskz_expandloadu_epi8(k: __mmask64, mem_addr: *const i8) -> __m512i { let mut dst: __m512i; asm!( @@ -163,6 +171,7 @@ pub unsafe fn _mm512_maskz_expandloadu_epi8(k: __mmask64, mem_addr: *const i8) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expandloadu_epi8) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vbmi2,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_mask_expandloadu_epi8( src: __m256i, k: __mmask32, @@ -184,6 +193,7 @@ pub unsafe fn _mm256_mask_expandloadu_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expandloadu_epi8) #[inline] #[target_feature(enable = "avx512f,avx512bw,avx512vbmi2,avx512vl,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm256_maskz_expandloadu_epi8(k: __mmask32, mem_addr: *const i8) -> __m256i { let mut dst: __m256i; asm!( @@ -201,6 +211,7 @@ pub unsafe fn _mm256_maskz_expandloadu_epi8(k: __mmask32, mem_addr: *const i8) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expandloadu_epi8) #[inline] #[target_feature(enable = "avx512f,avx512vbmi2,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_mask_expandloadu_epi8( src: __m128i, k: __mmask16, @@ -222,6 +233,7 @@ pub unsafe fn _mm_mask_expandloadu_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expandloadu_epi8) #[inline] #[target_feature(enable = "avx512f,avx512vbmi2,avx512vl,avx,sse")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub unsafe fn _mm_maskz_expandloadu_epi8(k: __mmask16, mem_addr: *const i8) -> __m128i { let mut dst: __m128i; asm!( @@ -239,6 +251,7 @@ pub unsafe fn _mm_maskz_expandloadu_epi8(k: __mmask16, mem_addr: *const i8) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compressstoreu_epi16) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressw))] pub unsafe fn _mm512_mask_compressstoreu_epi16(base_addr: *mut u8, k: __mmask32, a: __m512i) { vcompressstorew(base_addr as *mut _, a.as_i16x32(), k) @@ -249,6 +262,7 @@ pub unsafe fn _mm512_mask_compressstoreu_epi16(base_addr: *mut u8, k: __mmask32, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compressstoreu_epi16) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressw))] pub unsafe fn _mm256_mask_compressstoreu_epi16(base_addr: *mut u8, k: __mmask16, a: __m256i) { vcompressstorew256(base_addr as *mut _, a.as_i16x16(), k) @@ -259,6 +273,7 @@ pub unsafe fn _mm256_mask_compressstoreu_epi16(base_addr: *mut u8, k: __mmask16, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compressstoreu_epi16) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressw))] pub unsafe fn _mm_mask_compressstoreu_epi16(base_addr: *mut u8, k: __mmask8, a: __m128i) { vcompressstorew128(base_addr as *mut _, a.as_i16x8(), k) @@ -269,6 +284,7 @@ pub unsafe fn _mm_mask_compressstoreu_epi16(base_addr: *mut u8, k: __mmask8, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compressstoreu_epi8) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressb))] pub unsafe fn _mm512_mask_compressstoreu_epi8(base_addr: *mut u8, k: __mmask64, a: __m512i) { vcompressstoreb(base_addr as *mut _, a.as_i8x64(), k) @@ -279,6 +295,7 @@ pub unsafe fn _mm512_mask_compressstoreu_epi8(base_addr: *mut u8, k: __mmask64, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compressstoreu_epi8) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressb))] pub unsafe fn _mm256_mask_compressstoreu_epi8(base_addr: *mut u8, k: __mmask32, a: __m256i) { vcompressstoreb256(base_addr as *mut _, a.as_i8x32(), k) @@ -289,6 +306,7 @@ pub unsafe fn _mm256_mask_compressstoreu_epi8(base_addr: *mut u8, k: __mmask32, /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compressstoreu_epi8) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressb))] pub unsafe fn _mm_mask_compressstoreu_epi8(base_addr: *mut u8, k: __mmask16, a: __m128i) { vcompressstoreb128(base_addr as *mut _, a.as_i8x16(), k) @@ -299,6 +317,7 @@ pub unsafe fn _mm_mask_compressstoreu_epi8(base_addr: *mut u8, k: __mmask16, a: /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compress_epi16&expand=1192) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressw))] pub unsafe fn _mm512_mask_compress_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i { transmute(vpcompressw(a.as_i16x32(), src.as_i16x32(), k)) @@ -309,6 +328,7 @@ pub unsafe fn _mm512_mask_compress_epi16(src: __m512i, k: __mmask32, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_compress_epi16&expand=1193) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressw))] pub unsafe fn _mm512_maskz_compress_epi16(k: __mmask32, a: __m512i) -> __m512i { transmute(vpcompressw( @@ -323,6 +343,7 @@ pub unsafe fn _mm512_maskz_compress_epi16(k: __mmask32, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compress_epi16&expand=1190) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressw))] pub unsafe fn _mm256_mask_compress_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i { transmute(vpcompressw256(a.as_i16x16(), src.as_i16x16(), k)) @@ -333,6 +354,7 @@ pub unsafe fn _mm256_mask_compress_epi16(src: __m256i, k: __mmask16, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_compress_epi16&expand=1191) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressw))] pub unsafe fn _mm256_maskz_compress_epi16(k: __mmask16, a: __m256i) -> __m256i { transmute(vpcompressw256( @@ -347,6 +369,7 @@ pub unsafe fn _mm256_maskz_compress_epi16(k: __mmask16, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compress_epi16&expand=1188) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressw))] pub unsafe fn _mm_mask_compress_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpcompressw128(a.as_i16x8(), src.as_i16x8(), k)) @@ -357,6 +380,7 @@ pub unsafe fn _mm_mask_compress_epi16(src: __m128i, k: __mmask8, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_compress_epi16&expand=1189) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressw))] pub unsafe fn _mm_maskz_compress_epi16(k: __mmask8, a: __m128i) -> __m128i { transmute(vpcompressw128( @@ -371,6 +395,7 @@ pub unsafe fn _mm_maskz_compress_epi16(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compress_epi8&expand=1210) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressb))] pub unsafe fn _mm512_mask_compress_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __m512i { transmute(vpcompressb(a.as_i8x64(), src.as_i8x64(), k)) @@ -381,6 +406,7 @@ pub unsafe fn _mm512_mask_compress_epi8(src: __m512i, k: __mmask64, a: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_compress_epi8&expand=1211) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressb))] pub unsafe fn _mm512_maskz_compress_epi8(k: __mmask64, a: __m512i) -> __m512i { transmute(vpcompressb( @@ -395,6 +421,7 @@ pub unsafe fn _mm512_maskz_compress_epi8(k: __mmask64, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compress_epi8&expand=1208) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressb))] pub unsafe fn _mm256_mask_compress_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __m256i { transmute(vpcompressb256(a.as_i8x32(), src.as_i8x32(), k)) @@ -405,6 +432,7 @@ pub unsafe fn _mm256_mask_compress_epi8(src: __m256i, k: __mmask32, a: __m256i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_compress_epi8&expand=1209) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressb))] pub unsafe fn _mm256_maskz_compress_epi8(k: __mmask32, a: __m256i) -> __m256i { transmute(vpcompressb256( @@ -419,6 +447,7 @@ pub unsafe fn _mm256_maskz_compress_epi8(k: __mmask32, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compress_epi8&expand=1206) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressb))] pub unsafe fn _mm_mask_compress_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m128i { transmute(vpcompressb128(a.as_i8x16(), src.as_i8x16(), k)) @@ -429,6 +458,7 @@ pub unsafe fn _mm_mask_compress_epi8(src: __m128i, k: __mmask16, a: __m128i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_compress_epi8&expand=1207) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpcompressb))] pub unsafe fn _mm_maskz_compress_epi8(k: __mmask16, a: __m128i) -> __m128i { transmute(vpcompressb128( @@ -443,6 +473,7 @@ pub unsafe fn _mm_maskz_compress_epi8(k: __mmask16, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expand_epi16&expand=2310) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandw))] pub unsafe fn _mm512_mask_expand_epi16(src: __m512i, k: __mmask32, a: __m512i) -> __m512i { transmute(vpexpandw(a.as_i16x32(), src.as_i16x32(), k)) @@ -453,6 +484,7 @@ pub unsafe fn _mm512_mask_expand_epi16(src: __m512i, k: __mmask32, a: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expand_epi16&expand=2311) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandw))] pub unsafe fn _mm512_maskz_expand_epi16(k: __mmask32, a: __m512i) -> __m512i { transmute(vpexpandw( @@ -467,6 +499,7 @@ pub unsafe fn _mm512_maskz_expand_epi16(k: __mmask32, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expand_epi16&expand=2308) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandw))] pub unsafe fn _mm256_mask_expand_epi16(src: __m256i, k: __mmask16, a: __m256i) -> __m256i { transmute(vpexpandw256(a.as_i16x16(), src.as_i16x16(), k)) @@ -477,6 +510,7 @@ pub unsafe fn _mm256_mask_expand_epi16(src: __m256i, k: __mmask16, a: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expand_epi16&expand=2309) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandw))] pub unsafe fn _mm256_maskz_expand_epi16(k: __mmask16, a: __m256i) -> __m256i { transmute(vpexpandw256( @@ -491,6 +525,7 @@ pub unsafe fn _mm256_maskz_expand_epi16(k: __mmask16, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expand_epi16&expand=2306) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandw))] pub unsafe fn _mm_mask_expand_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(vpexpandw128(a.as_i16x8(), src.as_i16x8(), k)) @@ -501,6 +536,7 @@ pub unsafe fn _mm_mask_expand_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expand_epi16&expand=2307) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandw))] pub unsafe fn _mm_maskz_expand_epi16(k: __mmask8, a: __m128i) -> __m128i { transmute(vpexpandw128( @@ -515,6 +551,7 @@ pub unsafe fn _mm_maskz_expand_epi16(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expand_epi8&expand=2328) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandb))] pub unsafe fn _mm512_mask_expand_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __m512i { transmute(vpexpandb(a.as_i8x64(), src.as_i8x64(), k)) @@ -525,6 +562,7 @@ pub unsafe fn _mm512_mask_expand_epi8(src: __m512i, k: __mmask64, a: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expand_epi8&expand=2329) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandb))] pub unsafe fn _mm512_maskz_expand_epi8(k: __mmask64, a: __m512i) -> __m512i { transmute(vpexpandb( @@ -539,6 +577,7 @@ pub unsafe fn _mm512_maskz_expand_epi8(k: __mmask64, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expand_epi8&expand=2326) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandb))] pub unsafe fn _mm256_mask_expand_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __m256i { transmute(vpexpandb256(a.as_i8x32(), src.as_i8x32(), k)) @@ -549,6 +588,7 @@ pub unsafe fn _mm256_mask_expand_epi8(src: __m256i, k: __mmask32, a: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expand_epi8&expand=2327) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandb))] pub unsafe fn _mm256_maskz_expand_epi8(k: __mmask32, a: __m256i) -> __m256i { transmute(vpexpandb256( @@ -563,6 +603,7 @@ pub unsafe fn _mm256_maskz_expand_epi8(k: __mmask32, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expand_epi8&expand=2324) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandb))] pub unsafe fn _mm_mask_expand_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m128i { transmute(vpexpandb128(a.as_i8x16(), src.as_i8x16(), k)) @@ -573,6 +614,7 @@ pub unsafe fn _mm_mask_expand_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expand_epi8&expand=2325) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpexpandb))] pub unsafe fn _mm_maskz_expand_epi8(k: __mmask16, a: __m128i) -> __m128i { transmute(vpexpandb128( @@ -587,6 +629,7 @@ pub unsafe fn _mm_maskz_expand_epi8(k: __mmask16, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shldv_epi64&expand=5087) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvq))] pub unsafe fn _mm512_shldv_epi64(a: __m512i, b: __m512i, c: __m512i) -> __m512i { transmute(vpshldvq(a.as_i64x8(), b.as_i64x8(), c.as_i64x8())) @@ -597,6 +640,7 @@ pub unsafe fn _mm512_shldv_epi64(a: __m512i, b: __m512i, c: __m512i) -> __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shldv_epi64&expand=5085) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvq))] pub unsafe fn _mm512_mask_shldv_epi64(a: __m512i, k: __mmask8, b: __m512i, c: __m512i) -> __m512i { let shf = _mm512_shldv_epi64(a, b, c).as_i64x8(); @@ -608,6 +652,7 @@ pub unsafe fn _mm512_mask_shldv_epi64(a: __m512i, k: __mmask8, b: __m512i, c: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shldv_epi64&expand=5086) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvq))] pub unsafe fn _mm512_maskz_shldv_epi64(k: __mmask8, a: __m512i, b: __m512i, c: __m512i) -> __m512i { let shf = _mm512_shldv_epi64(a, b, c).as_i64x8(); @@ -620,6 +665,7 @@ pub unsafe fn _mm512_maskz_shldv_epi64(k: __mmask8, a: __m512i, b: __m512i, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shldv_epi64&expand=5084) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvq))] pub unsafe fn _mm256_shldv_epi64(a: __m256i, b: __m256i, c: __m256i) -> __m256i { transmute(vpshldvq256(a.as_i64x4(), b.as_i64x4(), c.as_i64x4())) @@ -630,6 +676,7 @@ pub unsafe fn _mm256_shldv_epi64(a: __m256i, b: __m256i, c: __m256i) -> __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shldv_epi64&expand=5082) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvq))] pub unsafe fn _mm256_mask_shldv_epi64(a: __m256i, k: __mmask8, b: __m256i, c: __m256i) -> __m256i { let shf = _mm256_shldv_epi64(a, b, c).as_i64x4(); @@ -641,6 +688,7 @@ pub unsafe fn _mm256_mask_shldv_epi64(a: __m256i, k: __mmask8, b: __m256i, c: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shldv_epi64&expand=5083) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvq))] pub unsafe fn _mm256_maskz_shldv_epi64(k: __mmask8, a: __m256i, b: __m256i, c: __m256i) -> __m256i { let shf = _mm256_shldv_epi64(a, b, c).as_i64x4(); @@ -653,6 +701,7 @@ pub unsafe fn _mm256_maskz_shldv_epi64(k: __mmask8, a: __m256i, b: __m256i, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shldv_epi64&expand=5081) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvq))] pub unsafe fn _mm_shldv_epi64(a: __m128i, b: __m128i, c: __m128i) -> __m128i { transmute(vpshldvq128(a.as_i64x2(), b.as_i64x2(), c.as_i64x2())) @@ -663,6 +712,7 @@ pub unsafe fn _mm_shldv_epi64(a: __m128i, b: __m128i, c: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shldv_epi64&expand=5079) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvq))] pub unsafe fn _mm_mask_shldv_epi64(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { let shf = _mm_shldv_epi64(a, b, c).as_i64x2(); @@ -674,6 +724,7 @@ pub unsafe fn _mm_mask_shldv_epi64(a: __m128i, k: __mmask8, b: __m128i, c: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shldv_epi64&expand=5080) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvq))] pub unsafe fn _mm_maskz_shldv_epi64(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { let shf = _mm_shldv_epi64(a, b, c).as_i64x2(); @@ -686,6 +737,7 @@ pub unsafe fn _mm_maskz_shldv_epi64(k: __mmask8, a: __m128i, b: __m128i, c: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shldv_epi32&expand=5078) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvd))] pub unsafe fn _mm512_shldv_epi32(a: __m512i, b: __m512i, c: __m512i) -> __m512i { transmute(vpshldvd(a.as_i32x16(), b.as_i32x16(), c.as_i32x16())) @@ -696,6 +748,7 @@ pub unsafe fn _mm512_shldv_epi32(a: __m512i, b: __m512i, c: __m512i) -> __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shldv_epi32&expand=5076) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvd))] pub unsafe fn _mm512_mask_shldv_epi32(a: __m512i, k: __mmask16, b: __m512i, c: __m512i) -> __m512i { let shf = _mm512_shldv_epi32(a, b, c).as_i32x16(); @@ -707,6 +760,7 @@ pub unsafe fn _mm512_mask_shldv_epi32(a: __m512i, k: __mmask16, b: __m512i, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shldv_epi32&expand=5077) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvd))] pub unsafe fn _mm512_maskz_shldv_epi32( k: __mmask16, @@ -724,6 +778,7 @@ pub unsafe fn _mm512_maskz_shldv_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shldv_epi32&expand=5075) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvd))] pub unsafe fn _mm256_shldv_epi32(a: __m256i, b: __m256i, c: __m256i) -> __m256i { transmute(vpshldvd256(a.as_i32x8(), b.as_i32x8(), c.as_i32x8())) @@ -734,6 +789,7 @@ pub unsafe fn _mm256_shldv_epi32(a: __m256i, b: __m256i, c: __m256i) -> __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shldv_epi32&expand=5073) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvd))] pub unsafe fn _mm256_mask_shldv_epi32(a: __m256i, k: __mmask8, b: __m256i, c: __m256i) -> __m256i { let shf = _mm256_shldv_epi32(a, b, c).as_i32x8(); @@ -745,6 +801,7 @@ pub unsafe fn _mm256_mask_shldv_epi32(a: __m256i, k: __mmask8, b: __m256i, c: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shldv_epi32&expand=5074) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvd))] pub unsafe fn _mm256_maskz_shldv_epi32(k: __mmask8, a: __m256i, b: __m256i, c: __m256i) -> __m256i { let shf = _mm256_shldv_epi32(a, b, c).as_i32x8(); @@ -757,6 +814,7 @@ pub unsafe fn _mm256_maskz_shldv_epi32(k: __mmask8, a: __m256i, b: __m256i, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shldv_epi32&expand=5072) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvd))] pub unsafe fn _mm_shldv_epi32(a: __m128i, b: __m128i, c: __m128i) -> __m128i { transmute(vpshldvd128(a.as_i32x4(), b.as_i32x4(), c.as_i32x4())) @@ -767,6 +825,7 @@ pub unsafe fn _mm_shldv_epi32(a: __m128i, b: __m128i, c: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shldv_epi32&expand=5070) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvd))] pub unsafe fn _mm_mask_shldv_epi32(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { let shf = _mm_shldv_epi32(a, b, c).as_i32x4(); @@ -778,6 +837,7 @@ pub unsafe fn _mm_mask_shldv_epi32(a: __m128i, k: __mmask8, b: __m128i, c: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shldv_epi32&expand=5071) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvd))] pub unsafe fn _mm_maskz_shldv_epi32(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { let shf = _mm_shldv_epi32(a, b, c).as_i32x4(); @@ -790,6 +850,7 @@ pub unsafe fn _mm_maskz_shldv_epi32(k: __mmask8, a: __m128i, b: __m128i, c: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shldv_epi16&expand=5069) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvw))] pub unsafe fn _mm512_shldv_epi16(a: __m512i, b: __m512i, c: __m512i) -> __m512i { transmute(vpshldvw(a.as_i16x32(), b.as_i16x32(), c.as_i16x32())) @@ -800,6 +861,7 @@ pub unsafe fn _mm512_shldv_epi16(a: __m512i, b: __m512i, c: __m512i) -> __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shldv_epi16&expand=5067) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvw))] pub unsafe fn _mm512_mask_shldv_epi16(a: __m512i, k: __mmask32, b: __m512i, c: __m512i) -> __m512i { let shf = _mm512_shldv_epi16(a, b, c).as_i16x32(); @@ -811,6 +873,7 @@ pub unsafe fn _mm512_mask_shldv_epi16(a: __m512i, k: __mmask32, b: __m512i, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shldv_epi16&expand=5068) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvw))] pub unsafe fn _mm512_maskz_shldv_epi16( k: __mmask32, @@ -828,6 +891,7 @@ pub unsafe fn _mm512_maskz_shldv_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shldv_epi16&expand=5066) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvw))] pub unsafe fn _mm256_shldv_epi16(a: __m256i, b: __m256i, c: __m256i) -> __m256i { transmute(vpshldvw256(a.as_i16x16(), b.as_i16x16(), c.as_i16x16())) @@ -838,6 +902,7 @@ pub unsafe fn _mm256_shldv_epi16(a: __m256i, b: __m256i, c: __m256i) -> __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shldv_epi16&expand=5064) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvw))] pub unsafe fn _mm256_mask_shldv_epi16(a: __m256i, k: __mmask16, b: __m256i, c: __m256i) -> __m256i { let shf = _mm256_shldv_epi16(a, b, c).as_i16x16(); @@ -849,6 +914,7 @@ pub unsafe fn _mm256_mask_shldv_epi16(a: __m256i, k: __mmask16, b: __m256i, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shldv_epi16&expand=5065) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvw))] pub unsafe fn _mm256_maskz_shldv_epi16( k: __mmask16, @@ -866,6 +932,7 @@ pub unsafe fn _mm256_maskz_shldv_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shldv_epi16&expand=5063) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvw))] pub unsafe fn _mm_shldv_epi16(a: __m128i, b: __m128i, c: __m128i) -> __m128i { transmute(vpshldvw128(a.as_i16x8(), b.as_i16x8(), c.as_i16x8())) @@ -876,6 +943,7 @@ pub unsafe fn _mm_shldv_epi16(a: __m128i, b: __m128i, c: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shldv_epi16&expand=5061) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvw))] pub unsafe fn _mm_mask_shldv_epi16(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { let shf = _mm_shldv_epi16(a, b, c).as_i16x8(); @@ -887,6 +955,7 @@ pub unsafe fn _mm_mask_shldv_epi16(a: __m128i, k: __mmask8, b: __m128i, c: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shldv_epi16&expand=5062) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldvw))] pub unsafe fn _mm_maskz_shldv_epi16(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { let shf = _mm_shldv_epi16(a, b, c).as_i16x8(); @@ -899,6 +968,7 @@ pub unsafe fn _mm_maskz_shldv_epi16(k: __mmask8, a: __m128i, b: __m128i, c: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shrdv_epi64&expand=5141) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvq))] pub unsafe fn _mm512_shrdv_epi64(a: __m512i, b: __m512i, c: __m512i) -> __m512i { transmute(vpshrdvq(a.as_i64x8(), b.as_i64x8(), c.as_i64x8())) @@ -909,6 +979,7 @@ pub unsafe fn _mm512_shrdv_epi64(a: __m512i, b: __m512i, c: __m512i) -> __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shrdv_epi64&expand=5139) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvq))] pub unsafe fn _mm512_mask_shrdv_epi64(a: __m512i, k: __mmask8, b: __m512i, c: __m512i) -> __m512i { let shf = _mm512_shrdv_epi64(a, b, c).as_i64x8(); @@ -920,6 +991,7 @@ pub unsafe fn _mm512_mask_shrdv_epi64(a: __m512i, k: __mmask8, b: __m512i, c: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shrdv_epi64&expand=5140) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvq))] pub unsafe fn _mm512_maskz_shrdv_epi64(k: __mmask8, a: __m512i, b: __m512i, c: __m512i) -> __m512i { let shf = _mm512_shrdv_epi64(a, b, c).as_i64x8(); @@ -932,6 +1004,7 @@ pub unsafe fn _mm512_maskz_shrdv_epi64(k: __mmask8, a: __m512i, b: __m512i, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shrdv_epi64&expand=5138) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvq))] pub unsafe fn _mm256_shrdv_epi64(a: __m256i, b: __m256i, c: __m256i) -> __m256i { transmute(vpshrdvq256(a.as_i64x4(), b.as_i64x4(), c.as_i64x4())) @@ -942,6 +1015,7 @@ pub unsafe fn _mm256_shrdv_epi64(a: __m256i, b: __m256i, c: __m256i) -> __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shrdv_epi64&expand=5136) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvq))] pub unsafe fn _mm256_mask_shrdv_epi64(a: __m256i, k: __mmask8, b: __m256i, c: __m256i) -> __m256i { let shf = _mm256_shrdv_epi64(a, b, c).as_i64x4(); @@ -953,6 +1027,7 @@ pub unsafe fn _mm256_mask_shrdv_epi64(a: __m256i, k: __mmask8, b: __m256i, c: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shrdv_epi64&expand=5137) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvq))] pub unsafe fn _mm256_maskz_shrdv_epi64(k: __mmask8, a: __m256i, b: __m256i, c: __m256i) -> __m256i { let shf = _mm256_shrdv_epi64(a, b, c).as_i64x4(); @@ -965,6 +1040,7 @@ pub unsafe fn _mm256_maskz_shrdv_epi64(k: __mmask8, a: __m256i, b: __m256i, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shrdv_epi64&expand=5135) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvq))] pub unsafe fn _mm_shrdv_epi64(a: __m128i, b: __m128i, c: __m128i) -> __m128i { transmute(vpshrdvq128(a.as_i64x2(), b.as_i64x2(), c.as_i64x2())) @@ -975,6 +1051,7 @@ pub unsafe fn _mm_shrdv_epi64(a: __m128i, b: __m128i, c: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shrdv_epi64&expand=5133) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvq))] pub unsafe fn _mm_mask_shrdv_epi64(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { let shf = _mm_shrdv_epi64(a, b, c).as_i64x2(); @@ -986,6 +1063,7 @@ pub unsafe fn _mm_mask_shrdv_epi64(a: __m128i, k: __mmask8, b: __m128i, c: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shrdv_epi64&expand=5134) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvq))] pub unsafe fn _mm_maskz_shrdv_epi64(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { let shf = _mm_shrdv_epi64(a, b, c).as_i64x2(); @@ -998,6 +1076,7 @@ pub unsafe fn _mm_maskz_shrdv_epi64(k: __mmask8, a: __m128i, b: __m128i, c: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shrdv_epi32&expand=5132) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvd))] pub unsafe fn _mm512_shrdv_epi32(a: __m512i, b: __m512i, c: __m512i) -> __m512i { transmute(vpshrdvd(a.as_i32x16(), b.as_i32x16(), c.as_i32x16())) @@ -1008,6 +1087,7 @@ pub unsafe fn _mm512_shrdv_epi32(a: __m512i, b: __m512i, c: __m512i) -> __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shrdv_epi32&expand=5130) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvd))] pub unsafe fn _mm512_mask_shrdv_epi32(a: __m512i, k: __mmask16, b: __m512i, c: __m512i) -> __m512i { let shf = _mm512_shrdv_epi32(a, b, c).as_i32x16(); @@ -1019,6 +1099,7 @@ pub unsafe fn _mm512_mask_shrdv_epi32(a: __m512i, k: __mmask16, b: __m512i, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shrdv_epi32&expand=5131) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvd))] pub unsafe fn _mm512_maskz_shrdv_epi32( k: __mmask16, @@ -1036,6 +1117,7 @@ pub unsafe fn _mm512_maskz_shrdv_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shrdv_epi32&expand=5129) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvd))] pub unsafe fn _mm256_shrdv_epi32(a: __m256i, b: __m256i, c: __m256i) -> __m256i { transmute(vpshrdvd256(a.as_i32x8(), b.as_i32x8(), c.as_i32x8())) @@ -1046,6 +1128,7 @@ pub unsafe fn _mm256_shrdv_epi32(a: __m256i, b: __m256i, c: __m256i) -> __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shrdv_epi32&expand=5127) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvd))] pub unsafe fn _mm256_mask_shrdv_epi32(a: __m256i, k: __mmask8, b: __m256i, c: __m256i) -> __m256i { let shf = _mm256_shrdv_epi32(a, b, c).as_i32x8(); @@ -1057,6 +1140,7 @@ pub unsafe fn _mm256_mask_shrdv_epi32(a: __m256i, k: __mmask8, b: __m256i, c: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shrdv_epi32&expand=5128) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvd))] pub unsafe fn _mm256_maskz_shrdv_epi32(k: __mmask8, a: __m256i, b: __m256i, c: __m256i) -> __m256i { let shf = _mm256_shrdv_epi32(a, b, c).as_i32x8(); @@ -1069,6 +1153,7 @@ pub unsafe fn _mm256_maskz_shrdv_epi32(k: __mmask8, a: __m256i, b: __m256i, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shrdv_epi32&expand=5126) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvd))] pub unsafe fn _mm_shrdv_epi32(a: __m128i, b: __m128i, c: __m128i) -> __m128i { transmute(vpshrdvd128(a.as_i32x4(), b.as_i32x4(), c.as_i32x4())) @@ -1079,6 +1164,7 @@ pub unsafe fn _mm_shrdv_epi32(a: __m128i, b: __m128i, c: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shrdv_epi32&expand=5124) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvd))] pub unsafe fn _mm_mask_shrdv_epi32(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { let shf = _mm_shrdv_epi32(a, b, c).as_i32x4(); @@ -1090,6 +1176,7 @@ pub unsafe fn _mm_mask_shrdv_epi32(a: __m128i, k: __mmask8, b: __m128i, c: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shrdv_epi32&expand=5125) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvd))] pub unsafe fn _mm_maskz_shrdv_epi32(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { let shf = _mm_shrdv_epi32(a, b, c).as_i32x4(); @@ -1102,6 +1189,7 @@ pub unsafe fn _mm_maskz_shrdv_epi32(k: __mmask8, a: __m128i, b: __m128i, c: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shrdv_epi16&expand=5123) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvw))] pub unsafe fn _mm512_shrdv_epi16(a: __m512i, b: __m512i, c: __m512i) -> __m512i { transmute(vpshrdvw(a.as_i16x32(), b.as_i16x32(), c.as_i16x32())) @@ -1112,6 +1200,7 @@ pub unsafe fn _mm512_shrdv_epi16(a: __m512i, b: __m512i, c: __m512i) -> __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shrdv_epi16&expand=5121) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvw))] pub unsafe fn _mm512_mask_shrdv_epi16(a: __m512i, k: __mmask32, b: __m512i, c: __m512i) -> __m512i { let shf = _mm512_shrdv_epi16(a, b, c).as_i16x32(); @@ -1123,6 +1212,7 @@ pub unsafe fn _mm512_mask_shrdv_epi16(a: __m512i, k: __mmask32, b: __m512i, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shrdv_epi16&expand=5122) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvw))] pub unsafe fn _mm512_maskz_shrdv_epi16( k: __mmask32, @@ -1140,6 +1230,7 @@ pub unsafe fn _mm512_maskz_shrdv_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shrdv_epi16&expand=5120) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvw))] pub unsafe fn _mm256_shrdv_epi16(a: __m256i, b: __m256i, c: __m256i) -> __m256i { transmute(vpshrdvw256(a.as_i16x16(), b.as_i16x16(), c.as_i16x16())) @@ -1150,6 +1241,7 @@ pub unsafe fn _mm256_shrdv_epi16(a: __m256i, b: __m256i, c: __m256i) -> __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shrdv_epi16&expand=5118) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvw))] pub unsafe fn _mm256_mask_shrdv_epi16(a: __m256i, k: __mmask16, b: __m256i, c: __m256i) -> __m256i { let shf = _mm256_shrdv_epi16(a, b, c).as_i16x16(); @@ -1161,6 +1253,7 @@ pub unsafe fn _mm256_mask_shrdv_epi16(a: __m256i, k: __mmask16, b: __m256i, c: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shrdv_epi16&expand=5119) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvw))] pub unsafe fn _mm256_maskz_shrdv_epi16( k: __mmask16, @@ -1178,6 +1271,7 @@ pub unsafe fn _mm256_maskz_shrdv_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shrdv_epi16&expand=5117) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvw))] pub unsafe fn _mm_shrdv_epi16(a: __m128i, b: __m128i, c: __m128i) -> __m128i { transmute(vpshrdvw128(a.as_i16x8(), b.as_i16x8(), c.as_i16x8())) @@ -1188,6 +1282,7 @@ pub unsafe fn _mm_shrdv_epi16(a: __m128i, b: __m128i, c: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shrdv_epi16&expand=5115) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvw))] pub unsafe fn _mm_mask_shrdv_epi16(a: __m128i, k: __mmask8, b: __m128i, c: __m128i) -> __m128i { let shf = _mm_shrdv_epi16(a, b, c).as_i16x8(); @@ -1199,6 +1294,7 @@ pub unsafe fn _mm_mask_shrdv_epi16(a: __m128i, k: __mmask8, b: __m128i, c: __m12 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shrdv_epi16&expand=5116) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshrdvw))] pub unsafe fn _mm_maskz_shrdv_epi16(k: __mmask8, a: __m128i, b: __m128i, c: __m128i) -> __m128i { let shf = _mm_shrdv_epi16(a, b, c).as_i16x8(); @@ -1211,6 +1307,7 @@ pub unsafe fn _mm_maskz_shrdv_epi16(k: __mmask8, a: __m128i, b: __m128i, c: __m1 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shldi_epi64&expand=5060) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shldi_epi64(a: __m512i, b: __m512i) -> __m512i { @@ -1228,6 +1325,7 @@ pub unsafe fn _mm512_shldi_epi64(a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shldi_epi64&expand=5058) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_shldi_epi64( @@ -1251,6 +1349,7 @@ pub unsafe fn _mm512_mask_shldi_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shldi_epi64&expand=5059) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_shldi_epi64( @@ -1274,6 +1373,7 @@ pub unsafe fn _mm512_maskz_shldi_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shldi_epi64&expand=5057) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shldi_epi64(a: __m256i, b: __m256i) -> __m256i { @@ -1291,6 +1391,7 @@ pub unsafe fn _mm256_shldi_epi64(a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shldi_epi64&expand=5055) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_shldi_epi64( @@ -1314,6 +1415,7 @@ pub unsafe fn _mm256_mask_shldi_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shldi_epi64&expand=5056) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_shldi_epi64( @@ -1337,6 +1439,7 @@ pub unsafe fn _mm256_maskz_shldi_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shldi_epi64&expand=5054) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_shldi_epi64(a: __m128i, b: __m128i) -> __m128i { @@ -1354,6 +1457,7 @@ pub unsafe fn _mm_shldi_epi64(a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shldi_epi64&expand=5052) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_shldi_epi64( @@ -1373,6 +1477,7 @@ pub unsafe fn _mm_mask_shldi_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shldi_epi64&expand=5053) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_shldi_epi64( @@ -1392,6 +1497,7 @@ pub unsafe fn _mm_maskz_shldi_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shldi_epi32&expand=5051) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shldi_epi32(a: __m512i, b: __m512i) -> __m512i { @@ -1408,6 +1514,7 @@ pub unsafe fn _mm512_shldi_epi32(a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shldi_epi32&expand=5049) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_shldi_epi32( @@ -1430,6 +1537,7 @@ pub unsafe fn _mm512_mask_shldi_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shldi_epi32&expand=5050) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_shldi_epi32( @@ -1452,6 +1560,7 @@ pub unsafe fn _mm512_maskz_shldi_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shldi_epi32&expand=5048) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shldi_epi32(a: __m256i, b: __m256i) -> __m256i { @@ -1468,6 +1577,7 @@ pub unsafe fn _mm256_shldi_epi32(a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shldi_epi32&expand=5046) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_shldi_epi32( @@ -1490,6 +1600,7 @@ pub unsafe fn _mm256_mask_shldi_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shldi_epi32&expand=5047) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_shldi_epi32( @@ -1512,6 +1623,7 @@ pub unsafe fn _mm256_maskz_shldi_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shldi_epi32&expand=5045) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_shldi_epi32(a: __m128i, b: __m128i) -> __m128i { @@ -1528,6 +1640,7 @@ pub unsafe fn _mm_shldi_epi32(a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shldi_epi32&expand=5043) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_shldi_epi32( @@ -1546,6 +1659,7 @@ pub unsafe fn _mm_mask_shldi_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shldi_epi32&expand=5044) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_shldi_epi32( @@ -1564,6 +1678,7 @@ pub unsafe fn _mm_maskz_shldi_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shldi_epi16&expand=5042) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shldi_epi16(a: __m512i, b: __m512i) -> __m512i { @@ -1581,6 +1696,7 @@ pub unsafe fn _mm512_shldi_epi16(a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shldi_epi16&expand=5040) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_shldi_epi16( @@ -1604,6 +1720,7 @@ pub unsafe fn _mm512_mask_shldi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shldi_epi16&expand=5041) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_shldi_epi16( @@ -1627,6 +1744,7 @@ pub unsafe fn _mm512_maskz_shldi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shldi_epi16&expand=5039) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shldi_epi16(a: __m256i, b: __m256i) -> __m256i { @@ -1644,6 +1762,7 @@ pub unsafe fn _mm256_shldi_epi16(a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shldi_epi16&expand=5037) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_shldi_epi16( @@ -1667,6 +1786,7 @@ pub unsafe fn _mm256_mask_shldi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shldi_epi16&expand=5038) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_shldi_epi16( @@ -1690,6 +1810,7 @@ pub unsafe fn _mm256_maskz_shldi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shldi_epi16&expand=5036) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_shldi_epi16(a: __m128i, b: __m128i) -> __m128i { @@ -1707,6 +1828,7 @@ pub unsafe fn _mm_shldi_epi16(a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shldi_epi16&expand=5034) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_shldi_epi16( @@ -1726,6 +1848,7 @@ pub unsafe fn _mm_mask_shldi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shldi_epi16&expand=5035) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_shldi_epi16( @@ -1745,6 +1868,7 @@ pub unsafe fn _mm_maskz_shldi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shrdi_epi64&expand=5114) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shrdi_epi64(a: __m512i, b: __m512i) -> __m512i { @@ -1762,6 +1886,7 @@ pub unsafe fn _mm512_shrdi_epi64(a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shrdi_epi64&expand=5112) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_shrdi_epi64( @@ -1785,6 +1910,7 @@ pub unsafe fn _mm512_mask_shrdi_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shrdi_epi64&expand=5113) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 255))] //should be vpshrdq #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_shrdi_epi64( @@ -1808,6 +1934,7 @@ pub unsafe fn _mm512_maskz_shrdi_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shrdi_epi64&expand=5111) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shrdi_epi64(a: __m256i, b: __m256i) -> __m256i { @@ -1825,6 +1952,7 @@ pub unsafe fn _mm256_shrdi_epi64(a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shrdi_epi64&expand=5109) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_shrdi_epi64( @@ -1848,6 +1976,7 @@ pub unsafe fn _mm256_mask_shrdi_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shrdi_epi64&expand=5110) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_shrdi_epi64( @@ -1871,6 +2000,7 @@ pub unsafe fn _mm256_maskz_shrdi_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shrdi_epi64&expand=5108) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_shrdi_epi64(a: __m128i, b: __m128i) -> __m128i { @@ -1888,6 +2018,7 @@ pub unsafe fn _mm_shrdi_epi64(a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shrdi_epi64&expand=5106) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_shrdi_epi64( @@ -1907,6 +2038,7 @@ pub unsafe fn _mm_mask_shrdi_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shrdi_epi64&expand=5107) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_shrdi_epi64( @@ -1926,6 +2058,7 @@ pub unsafe fn _mm_maskz_shrdi_epi64( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shrdi_epi32&expand=5105) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shrdi_epi32(a: __m512i, b: __m512i) -> __m512i { @@ -1942,6 +2075,7 @@ pub unsafe fn _mm512_shrdi_epi32(a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shrdi_epi32&expand=5103) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_shrdi_epi32( @@ -1964,6 +2098,7 @@ pub unsafe fn _mm512_mask_shrdi_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shrdi_epi32&expand=5104) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_shrdi_epi32( @@ -1986,6 +2121,7 @@ pub unsafe fn _mm512_maskz_shrdi_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shrdi_epi32&expand=5102) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shrdi_epi32(a: __m256i, b: __m256i) -> __m256i { @@ -2002,6 +2138,7 @@ pub unsafe fn _mm256_shrdi_epi32(a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shrdi_epi32&expand=5100) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_shrdi_epi32( @@ -2024,6 +2161,7 @@ pub unsafe fn _mm256_mask_shrdi_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shrdi_epi32&expand=5101) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_shrdi_epi32( @@ -2046,6 +2184,7 @@ pub unsafe fn _mm256_maskz_shrdi_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shrdi_epi32&expand=5099) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_shrdi_epi32(a: __m128i, b: __m128i) -> __m128i { @@ -2062,6 +2201,7 @@ pub unsafe fn _mm_shrdi_epi32(a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shrdi_epi32&expand=5097) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_shrdi_epi32( @@ -2080,6 +2220,7 @@ pub unsafe fn _mm_mask_shrdi_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shrdi_epi32&expand=5098) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_shrdi_epi32( @@ -2098,6 +2239,7 @@ pub unsafe fn _mm_maskz_shrdi_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shrdi_epi16&expand=5096) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_shrdi_epi16(a: __m512i, b: __m512i) -> __m512i { @@ -2116,6 +2258,7 @@ pub unsafe fn _mm512_shrdi_epi16(a: __m512i, b: __m512i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shrdi_epi16&expand=5094) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_shrdi_epi16( @@ -2140,6 +2283,7 @@ pub unsafe fn _mm512_mask_shrdi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shrdi_epi16&expand=5095) #[inline] #[target_feature(enable = "avx512vbmi2")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_shrdi_epi16( @@ -2164,6 +2308,7 @@ pub unsafe fn _mm512_maskz_shrdi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shrdi_epi16&expand=5093) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_shrdi_epi16(a: __m256i, b: __m256i) -> __m256i { @@ -2182,6 +2327,7 @@ pub unsafe fn _mm256_shrdi_epi16(a: __m256i, b: __m256i) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shrdi_epi16&expand=5091) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_shrdi_epi16( @@ -2206,6 +2352,7 @@ pub unsafe fn _mm256_mask_shrdi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shrdi_epi16&expand=5092) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_shrdi_epi16( @@ -2229,6 +2376,7 @@ pub unsafe fn _mm256_maskz_shrdi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shrdi_epi16&expand=5090) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_shrdi_epi16(a: __m128i, b: __m128i) -> __m128i { @@ -2246,6 +2394,7 @@ pub unsafe fn _mm_shrdi_epi16(a: __m128i, b: __m128i) -> __m128 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shrdi_epi16&expand=5088) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_shrdi_epi16( @@ -2265,6 +2414,7 @@ pub unsafe fn _mm_mask_shrdi_epi16( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shrdi_epi16&expand=5089) #[inline] #[target_feature(enable = "avx512vbmi2,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_shrdi_epi16( diff --git a/crates/core_arch/src/x86/avx512vnni.rs b/crates/core_arch/src/x86/avx512vnni.rs index 562c1ccb81..ad41b7b2d1 100644 --- a/crates/core_arch/src/x86/avx512vnni.rs +++ b/crates/core_arch/src/x86/avx512vnni.rs @@ -11,6 +11,7 @@ use stdarch_test::assert_instr; /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_dpwssd_epi32&expand=2219) #[inline] #[target_feature(enable = "avx512vnni")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssd))] pub unsafe fn _mm512_dpwssd_epi32(src: __m512i, a: __m512i, b: __m512i) -> __m512i { transmute(vpdpwssd(src.as_i32x16(), a.as_i32x16(), b.as_i32x16())) @@ -21,6 +22,7 @@ pub unsafe fn _mm512_dpwssd_epi32(src: __m512i, a: __m512i, b: __m512i) -> __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_dpwssd_epi32&expand=2220) #[inline] #[target_feature(enable = "avx512vnni")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssd))] pub unsafe fn _mm512_mask_dpwssd_epi32( src: __m512i, @@ -37,6 +39,7 @@ pub unsafe fn _mm512_mask_dpwssd_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_dpwssd_epi32&expand=2221) #[inline] #[target_feature(enable = "avx512vnni")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssd))] pub unsafe fn _mm512_maskz_dpwssd_epi32( k: __mmask16, @@ -54,6 +57,7 @@ pub unsafe fn _mm512_maskz_dpwssd_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_dpwssd_epi32&expand=2216) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssd))] pub unsafe fn _mm256_dpwssd_epi32(src: __m256i, a: __m256i, b: __m256i) -> __m256i { transmute(vpdpwssd256(src.as_i32x8(), a.as_i32x8(), b.as_i32x8())) @@ -64,6 +68,7 @@ pub unsafe fn _mm256_dpwssd_epi32(src: __m256i, a: __m256i, b: __m256i) -> __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_dpwssd_epi32&expand=2217) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssd))] pub unsafe fn _mm256_mask_dpwssd_epi32( src: __m256i, @@ -80,6 +85,7 @@ pub unsafe fn _mm256_mask_dpwssd_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_dpwssd_epi32&expand=2218) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssd))] pub unsafe fn _mm256_maskz_dpwssd_epi32( k: __mmask8, @@ -97,6 +103,7 @@ pub unsafe fn _mm256_maskz_dpwssd_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dpwssd_epi32&expand=2213) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssd))] pub unsafe fn _mm_dpwssd_epi32(src: __m128i, a: __m128i, b: __m128i) -> __m128i { transmute(vpdpwssd128(src.as_i32x4(), a.as_i32x4(), b.as_i32x4())) @@ -107,6 +114,7 @@ pub unsafe fn _mm_dpwssd_epi32(src: __m128i, a: __m128i, b: __m128i) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_dpwssd_epi32&expand=2214) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssd))] pub unsafe fn _mm_mask_dpwssd_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let r = _mm_dpwssd_epi32(src, a, b).as_i32x4(); @@ -118,6 +126,7 @@ pub unsafe fn _mm_mask_dpwssd_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_dpwssd_epi32&expand=2215) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssd))] pub unsafe fn _mm_maskz_dpwssd_epi32(k: __mmask8, src: __m128i, a: __m128i, b: __m128i) -> __m128i { let r = _mm_dpwssd_epi32(src, a, b).as_i32x4(); @@ -130,6 +139,7 @@ pub unsafe fn _mm_maskz_dpwssd_epi32(k: __mmask8, src: __m128i, a: __m128i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_dpwssds_epi32&expand=2228) #[inline] #[target_feature(enable = "avx512vnni")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssds))] pub unsafe fn _mm512_dpwssds_epi32(src: __m512i, a: __m512i, b: __m512i) -> __m512i { transmute(vpdpwssds(src.as_i32x16(), a.as_i32x16(), b.as_i32x16())) @@ -140,6 +150,7 @@ pub unsafe fn _mm512_dpwssds_epi32(src: __m512i, a: __m512i, b: __m512i) -> __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_dpwssds_epi32&expand=2229) #[inline] #[target_feature(enable = "avx512vnni")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssds))] pub unsafe fn _mm512_mask_dpwssds_epi32( src: __m512i, @@ -156,6 +167,7 @@ pub unsafe fn _mm512_mask_dpwssds_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_dpwssds_epi32&expand=2230) #[inline] #[target_feature(enable = "avx512vnni")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssds))] pub unsafe fn _mm512_maskz_dpwssds_epi32( k: __mmask16, @@ -173,6 +185,7 @@ pub unsafe fn _mm512_maskz_dpwssds_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_dpwssds_epi32&expand=2225) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssds))] pub unsafe fn _mm256_dpwssds_epi32(src: __m256i, a: __m256i, b: __m256i) -> __m256i { transmute(vpdpwssds256(src.as_i32x8(), a.as_i32x8(), b.as_i32x8())) @@ -183,6 +196,7 @@ pub unsafe fn _mm256_dpwssds_epi32(src: __m256i, a: __m256i, b: __m256i) -> __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_dpwssds_epi32&expand=2226) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssds))] pub unsafe fn _mm256_mask_dpwssds_epi32( src: __m256i, @@ -199,6 +213,7 @@ pub unsafe fn _mm256_mask_dpwssds_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_dpwssds_epi32&expand=2227) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssds))] pub unsafe fn _mm256_maskz_dpwssds_epi32( k: __mmask8, @@ -216,6 +231,7 @@ pub unsafe fn _mm256_maskz_dpwssds_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dpwssds_epi32&expand=2222) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssds))] pub unsafe fn _mm_dpwssds_epi32(src: __m128i, a: __m128i, b: __m128i) -> __m128i { transmute(vpdpwssds128(src.as_i32x4(), a.as_i32x4(), b.as_i32x4())) @@ -226,6 +242,7 @@ pub unsafe fn _mm_dpwssds_epi32(src: __m128i, a: __m128i, b: __m128i) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_dpwssds_epi32&expand=2223) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssds))] pub unsafe fn _mm_mask_dpwssds_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let r = _mm_dpwssds_epi32(src, a, b).as_i32x4(); @@ -237,6 +254,7 @@ pub unsafe fn _mm_mask_dpwssds_epi32(src: __m128i, k: __mmask8, a: __m128i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_dpwssds_epi32&expand=2224) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpwssds))] pub unsafe fn _mm_maskz_dpwssds_epi32( k: __mmask8, @@ -254,6 +272,7 @@ pub unsafe fn _mm_maskz_dpwssds_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_dpbusd_epi32&expand=2201) #[inline] #[target_feature(enable = "avx512vnni")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusd))] pub unsafe fn _mm512_dpbusd_epi32(src: __m512i, a: __m512i, b: __m512i) -> __m512i { transmute(vpdpbusd(src.as_i32x16(), a.as_i32x16(), b.as_i32x16())) @@ -264,6 +283,7 @@ pub unsafe fn _mm512_dpbusd_epi32(src: __m512i, a: __m512i, b: __m512i) -> __m51 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_dpbusd_epi32&expand=2202) #[inline] #[target_feature(enable = "avx512vnni")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusd))] pub unsafe fn _mm512_mask_dpbusd_epi32( src: __m512i, @@ -280,6 +300,7 @@ pub unsafe fn _mm512_mask_dpbusd_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_dpbusd_epi32&expand=2203) #[inline] #[target_feature(enable = "avx512vnni")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusd))] pub unsafe fn _mm512_maskz_dpbusd_epi32( k: __mmask16, @@ -297,6 +318,7 @@ pub unsafe fn _mm512_maskz_dpbusd_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_dpbusd_epi32&expand=2198) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusd))] pub unsafe fn _mm256_dpbusd_epi32(src: __m256i, a: __m256i, b: __m256i) -> __m256i { transmute(vpdpbusd256(src.as_i32x8(), a.as_i32x8(), b.as_i32x8())) @@ -307,6 +329,7 @@ pub unsafe fn _mm256_dpbusd_epi32(src: __m256i, a: __m256i, b: __m256i) -> __m25 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_dpbusd_epi32&expand=2199) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusd))] pub unsafe fn _mm256_mask_dpbusd_epi32( src: __m256i, @@ -323,6 +346,7 @@ pub unsafe fn _mm256_mask_dpbusd_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_dpbusd_epi32&expand=2200) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusd))] pub unsafe fn _mm256_maskz_dpbusd_epi32( k: __mmask8, @@ -340,6 +364,7 @@ pub unsafe fn _mm256_maskz_dpbusd_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dpbusd_epi32&expand=2195) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusd))] pub unsafe fn _mm_dpbusd_epi32(src: __m128i, a: __m128i, b: __m128i) -> __m128i { transmute(vpdpbusd128(src.as_i32x4(), a.as_i32x4(), b.as_i32x4())) @@ -350,6 +375,7 @@ pub unsafe fn _mm_dpbusd_epi32(src: __m128i, a: __m128i, b: __m128i) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_dpbusd_epi32&expand=2196) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusd))] pub unsafe fn _mm_mask_dpbusd_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let r = _mm_dpbusd_epi32(src, a, b).as_i32x4(); @@ -361,6 +387,7 @@ pub unsafe fn _mm_mask_dpbusd_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_dpbusd_epi32&expand=2197) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusd))] pub unsafe fn _mm_maskz_dpbusd_epi32(k: __mmask8, src: __m128i, a: __m128i, b: __m128i) -> __m128i { let r = _mm_dpbusd_epi32(src, a, b).as_i32x4(); @@ -373,6 +400,7 @@ pub unsafe fn _mm_maskz_dpbusd_epi32(k: __mmask8, src: __m128i, a: __m128i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_dpbusds_epi32&expand=2210) #[inline] #[target_feature(enable = "avx512vnni")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusds))] pub unsafe fn _mm512_dpbusds_epi32(src: __m512i, a: __m512i, b: __m512i) -> __m512i { transmute(vpdpbusds(src.as_i32x16(), a.as_i32x16(), b.as_i32x16())) @@ -383,6 +411,7 @@ pub unsafe fn _mm512_dpbusds_epi32(src: __m512i, a: __m512i, b: __m512i) -> __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_dpbusds_epi32&expand=2211) #[inline] #[target_feature(enable = "avx512vnni")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusds))] pub unsafe fn _mm512_mask_dpbusds_epi32( src: __m512i, @@ -399,6 +428,7 @@ pub unsafe fn _mm512_mask_dpbusds_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_dpbusds_epi32&expand=2212) #[inline] #[target_feature(enable = "avx512vnni")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusds))] pub unsafe fn _mm512_maskz_dpbusds_epi32( k: __mmask16, @@ -416,6 +446,7 @@ pub unsafe fn _mm512_maskz_dpbusds_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_dpbusds_epi32&expand=2207) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusds))] pub unsafe fn _mm256_dpbusds_epi32(src: __m256i, a: __m256i, b: __m256i) -> __m256i { transmute(vpdpbusds256(src.as_i32x8(), a.as_i32x8(), b.as_i32x8())) @@ -426,6 +457,7 @@ pub unsafe fn _mm256_dpbusds_epi32(src: __m256i, a: __m256i, b: __m256i) -> __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_dpbusds_epi32&expand=2208) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusds))] pub unsafe fn _mm256_mask_dpbusds_epi32( src: __m256i, @@ -442,6 +474,7 @@ pub unsafe fn _mm256_mask_dpbusds_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_dpbusds_epi32&expand=2209) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusds))] pub unsafe fn _mm256_maskz_dpbusds_epi32( k: __mmask8, @@ -459,6 +492,7 @@ pub unsafe fn _mm256_maskz_dpbusds_epi32( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dpbusds_epi32&expand=2204) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusds))] pub unsafe fn _mm_dpbusds_epi32(src: __m128i, a: __m128i, b: __m128i) -> __m128i { transmute(vpdpbusds128(src.as_i32x4(), a.as_i32x4(), b.as_i32x4())) @@ -469,6 +503,7 @@ pub unsafe fn _mm_dpbusds_epi32(src: __m128i, a: __m128i, b: __m128i) -> __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_dpbusds_epi32&expand=2205) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusds))] pub unsafe fn _mm_mask_dpbusds_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128i) -> __m128i { let r = _mm_dpbusds_epi32(src, a, b).as_i32x4(); @@ -480,6 +515,7 @@ pub unsafe fn _mm_mask_dpbusds_epi32(src: __m128i, k: __mmask8, a: __m128i, b: _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_dpbusds_epi32&expand=2206) #[inline] #[target_feature(enable = "avx512vnni,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpdpbusds))] pub unsafe fn _mm_maskz_dpbusds_epi32( k: __mmask8, diff --git a/crates/core_arch/src/x86/avx512vpopcntdq.rs b/crates/core_arch/src/x86/avx512vpopcntdq.rs index d196958f03..cedf72f7d8 100644 --- a/crates/core_arch/src/x86/avx512vpopcntdq.rs +++ b/crates/core_arch/src/x86/avx512vpopcntdq.rs @@ -52,6 +52,7 @@ extern "C" { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_popcnt_epi32) #[inline] #[target_feature(enable = "avx512vpopcntdq")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntd))] pub unsafe fn _mm512_popcnt_epi32(a: __m512i) -> __m512i { transmute(popcnt_v16i32(a.as_i32x16())) @@ -65,6 +66,7 @@ pub unsafe fn _mm512_popcnt_epi32(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_popcnt_epi32) #[inline] #[target_feature(enable = "avx512vpopcntdq")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntd))] pub unsafe fn _mm512_maskz_popcnt_epi32(k: __mmask16, a: __m512i) -> __m512i { let zero = _mm512_setzero_si512().as_i32x16(); @@ -79,6 +81,7 @@ pub unsafe fn _mm512_maskz_popcnt_epi32(k: __mmask16, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_popcnt_epi32) #[inline] #[target_feature(enable = "avx512vpopcntdq")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntd))] pub unsafe fn _mm512_mask_popcnt_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { transmute(simd_select_bitmask( @@ -93,6 +96,7 @@ pub unsafe fn _mm512_mask_popcnt_epi32(src: __m512i, k: __mmask16, a: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_popcnt_epi32) #[inline] #[target_feature(enable = "avx512vpopcntdq,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntd))] pub unsafe fn _mm256_popcnt_epi32(a: __m256i) -> __m256i { transmute(popcnt_v8i32(a.as_i32x8())) @@ -106,6 +110,7 @@ pub unsafe fn _mm256_popcnt_epi32(a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_popcnt_epi32) #[inline] #[target_feature(enable = "avx512vpopcntdq,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntd))] pub unsafe fn _mm256_maskz_popcnt_epi32(k: __mmask8, a: __m256i) -> __m256i { let zero = _mm256_setzero_si256().as_i32x8(); @@ -120,6 +125,7 @@ pub unsafe fn _mm256_maskz_popcnt_epi32(k: __mmask8, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_popcnt_epi32) #[inline] #[target_feature(enable = "avx512vpopcntdq,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntd))] pub unsafe fn _mm256_mask_popcnt_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { transmute(simd_select_bitmask( @@ -134,6 +140,7 @@ pub unsafe fn _mm256_mask_popcnt_epi32(src: __m256i, k: __mmask8, a: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_epi32) #[inline] #[target_feature(enable = "avx512vpopcntdq,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntd))] pub unsafe fn _mm_popcnt_epi32(a: __m128i) -> __m128i { transmute(popcnt_v4i32(a.as_i32x4())) @@ -147,6 +154,7 @@ pub unsafe fn _mm_popcnt_epi32(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_popcnt_epi32) #[inline] #[target_feature(enable = "avx512vpopcntdq,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntd))] pub unsafe fn _mm_maskz_popcnt_epi32(k: __mmask8, a: __m128i) -> __m128i { let zero = _mm_setzero_si128().as_i32x4(); @@ -161,6 +169,7 @@ pub unsafe fn _mm_maskz_popcnt_epi32(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_popcnt_epi32) #[inline] #[target_feature(enable = "avx512vpopcntdq,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntd))] pub unsafe fn _mm_mask_popcnt_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(simd_select_bitmask( @@ -175,6 +184,7 @@ pub unsafe fn _mm_mask_popcnt_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_popcnt_epi64) #[inline] #[target_feature(enable = "avx512vpopcntdq")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntq))] pub unsafe fn _mm512_popcnt_epi64(a: __m512i) -> __m512i { transmute(popcnt_v8i64(a.as_i64x8())) @@ -188,6 +198,7 @@ pub unsafe fn _mm512_popcnt_epi64(a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_popcnt_epi64) #[inline] #[target_feature(enable = "avx512vpopcntdq")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntq))] pub unsafe fn _mm512_maskz_popcnt_epi64(k: __mmask8, a: __m512i) -> __m512i { let zero = _mm512_setzero_si512().as_i64x8(); @@ -202,6 +213,7 @@ pub unsafe fn _mm512_maskz_popcnt_epi64(k: __mmask8, a: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_popcnt_epi64) #[inline] #[target_feature(enable = "avx512vpopcntdq")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntq))] pub unsafe fn _mm512_mask_popcnt_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { transmute(simd_select_bitmask( @@ -216,6 +228,7 @@ pub unsafe fn _mm512_mask_popcnt_epi64(src: __m512i, k: __mmask8, a: __m512i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_popcnt_epi64) #[inline] #[target_feature(enable = "avx512vpopcntdq,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntq))] pub unsafe fn _mm256_popcnt_epi64(a: __m256i) -> __m256i { transmute(popcnt_v4i64(a.as_i64x4())) @@ -229,6 +242,7 @@ pub unsafe fn _mm256_popcnt_epi64(a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_popcnt_epi64) #[inline] #[target_feature(enable = "avx512vpopcntdq,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntq))] pub unsafe fn _mm256_maskz_popcnt_epi64(k: __mmask8, a: __m256i) -> __m256i { let zero = _mm256_setzero_si256().as_i64x4(); @@ -243,6 +257,7 @@ pub unsafe fn _mm256_maskz_popcnt_epi64(k: __mmask8, a: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_popcnt_epi64) #[inline] #[target_feature(enable = "avx512vpopcntdq,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntq))] pub unsafe fn _mm256_mask_popcnt_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { transmute(simd_select_bitmask( @@ -257,6 +272,7 @@ pub unsafe fn _mm256_mask_popcnt_epi64(src: __m256i, k: __mmask8, a: __m256i) -> /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_epi64) #[inline] #[target_feature(enable = "avx512vpopcntdq,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntq))] pub unsafe fn _mm_popcnt_epi64(a: __m128i) -> __m128i { transmute(popcnt_v2i64(a.as_i64x2())) @@ -270,6 +286,7 @@ pub unsafe fn _mm_popcnt_epi64(a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_popcnt_epi64) #[inline] #[target_feature(enable = "avx512vpopcntdq,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntq))] pub unsafe fn _mm_maskz_popcnt_epi64(k: __mmask8, a: __m128i) -> __m128i { let zero = _mm_setzero_si128().as_i64x2(); @@ -284,6 +301,7 @@ pub unsafe fn _mm_maskz_popcnt_epi64(k: __mmask8, a: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_popcnt_epi64) #[inline] #[target_feature(enable = "avx512vpopcntdq,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpopcntq))] pub unsafe fn _mm_mask_popcnt_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { transmute(simd_select_bitmask( diff --git a/crates/core_arch/src/x86/cpuid.rs b/crates/core_arch/src/x86/cpuid.rs index 3bfb353007..90394a921e 100644 --- a/crates/core_arch/src/x86/cpuid.rs +++ b/crates/core_arch/src/x86/cpuid.rs @@ -98,6 +98,7 @@ pub unsafe fn __cpuid(leaf: u32) -> CpuidResult { /// Does the host support the `cpuid` instruction? #[inline] +#[unstable(feature = "stdarch_x86_has_cpuid", issue = "60123")] pub fn has_cpuid() -> bool { #[cfg(target_env = "sgx")] { diff --git a/crates/core_arch/src/x86/gfni.rs b/crates/core_arch/src/x86/gfni.rs index 7c2195e713..2a2cb72ce0 100644 --- a/crates/core_arch/src/x86/gfni.rs +++ b/crates/core_arch/src/x86/gfni.rs @@ -66,6 +66,7 @@ extern "C" { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_gf2p8mul_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8mulb))] pub unsafe fn _mm512_gf2p8mul_epi8(a: __m512i, b: __m512i) -> __m512i { transmute(vgf2p8mulb_512(a.as_i8x64(), b.as_i8x64())) @@ -81,6 +82,7 @@ pub unsafe fn _mm512_gf2p8mul_epi8(a: __m512i, b: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_gf2p8mul_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8mulb))] pub unsafe fn _mm512_mask_gf2p8mul_epi8( src: __m512i, @@ -105,6 +107,7 @@ pub unsafe fn _mm512_mask_gf2p8mul_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_gf2p8mul_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8mulb))] pub unsafe fn _mm512_maskz_gf2p8mul_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let zero = _mm512_setzero_si512().as_i8x64(); @@ -122,6 +125,7 @@ pub unsafe fn _mm512_maskz_gf2p8mul_epi8(k: __mmask64, a: __m512i, b: __m512i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_gf2p8mul_epi8) #[inline] #[target_feature(enable = "gfni,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8mulb))] pub unsafe fn _mm256_gf2p8mul_epi8(a: __m256i, b: __m256i) -> __m256i { transmute(vgf2p8mulb_256(a.as_i8x32(), b.as_i8x32())) @@ -137,6 +141,7 @@ pub unsafe fn _mm256_gf2p8mul_epi8(a: __m256i, b: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_gf2p8mul_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8mulb))] pub unsafe fn _mm256_mask_gf2p8mul_epi8( src: __m256i, @@ -161,6 +166,7 @@ pub unsafe fn _mm256_mask_gf2p8mul_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_gf2p8mul_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8mulb))] pub unsafe fn _mm256_maskz_gf2p8mul_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let zero = _mm256_setzero_si256().as_i8x32(); @@ -178,6 +184,7 @@ pub unsafe fn _mm256_maskz_gf2p8mul_epi8(k: __mmask32, a: __m256i, b: __m256i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_gf2p8mul_epi8) #[inline] #[target_feature(enable = "gfni")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(gf2p8mulb))] pub unsafe fn _mm_gf2p8mul_epi8(a: __m128i, b: __m128i) -> __m128i { transmute(vgf2p8mulb_128(a.as_i8x16(), b.as_i8x16())) @@ -193,6 +200,7 @@ pub unsafe fn _mm_gf2p8mul_epi8(a: __m128i, b: __m128i) -> __m128i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_gf2p8mul_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8mulb))] pub unsafe fn _mm_mask_gf2p8mul_epi8( src: __m128i, @@ -217,6 +225,7 @@ pub unsafe fn _mm_mask_gf2p8mul_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_gf2p8mul_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8mulb))] pub unsafe fn _mm_maskz_gf2p8mul_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let zero = _mm_setzero_si128().as_i8x16(); @@ -235,6 +244,7 @@ pub unsafe fn _mm_maskz_gf2p8mul_epi8(k: __mmask16, a: __m128i, b: __m128i) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_gf2p8affine_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_gf2p8affine_epi64_epi8(x: __m512i, a: __m512i) -> __m512i { @@ -257,6 +267,7 @@ pub unsafe fn _mm512_gf2p8affine_epi64_epi8(x: __m512i, a: __m512i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_gf2p8affine_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_gf2p8affine_epi64_epi8( @@ -284,6 +295,7 @@ pub unsafe fn _mm512_maskz_gf2p8affine_epi64_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_gf2p8affine_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_gf2p8affine_epi64_epi8( @@ -308,6 +320,7 @@ pub unsafe fn _mm512_mask_gf2p8affine_epi64_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_gf2p8affine_epi8) #[inline] #[target_feature(enable = "gfni,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_gf2p8affine_epi64_epi8(x: __m256i, a: __m256i) -> __m256i { @@ -330,6 +343,7 @@ pub unsafe fn _mm256_gf2p8affine_epi64_epi8(x: __m256i, a: __m256i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_gf2p8affine_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_gf2p8affine_epi64_epi8( @@ -357,6 +371,7 @@ pub unsafe fn _mm256_maskz_gf2p8affine_epi64_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_gf2p8affine_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_gf2p8affine_epi64_epi8( @@ -381,6 +396,7 @@ pub unsafe fn _mm256_mask_gf2p8affine_epi64_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_gf2p8affine_epi8) #[inline] #[target_feature(enable = "gfni")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(gf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_gf2p8affine_epi64_epi8(x: __m128i, a: __m128i) -> __m128i { @@ -403,6 +419,7 @@ pub unsafe fn _mm_gf2p8affine_epi64_epi8(x: __m128i, a: __m128i) - /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_gf2p8affine_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_gf2p8affine_epi64_epi8( @@ -430,6 +447,7 @@ pub unsafe fn _mm_maskz_gf2p8affine_epi64_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_gf2p8affine_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_gf2p8affine_epi64_epi8( @@ -456,6 +474,7 @@ pub unsafe fn _mm_mask_gf2p8affine_epi64_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_gf2p8affineinv_epi64_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_gf2p8affineinv_epi64_epi8(x: __m512i, a: __m512i) -> __m512i { @@ -480,6 +499,7 @@ pub unsafe fn _mm512_gf2p8affineinv_epi64_epi8(x: __m512i, a: __m5 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_gf2p8affineinv_epi64_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_gf2p8affineinv_epi64_epi8( @@ -509,6 +529,7 @@ pub unsafe fn _mm512_maskz_gf2p8affineinv_epi64_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_gf2p8affineinv_epi64_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_gf2p8affineinv_epi64_epi8( @@ -535,6 +556,7 @@ pub unsafe fn _mm512_mask_gf2p8affineinv_epi64_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_gf2p8affineinv_epi64_epi8) #[inline] #[target_feature(enable = "gfni,avx")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_gf2p8affineinv_epi64_epi8(x: __m256i, a: __m256i) -> __m256i { @@ -559,6 +581,7 @@ pub unsafe fn _mm256_gf2p8affineinv_epi64_epi8(x: __m256i, a: __m2 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_gf2p8affineinv_epi64_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_gf2p8affineinv_epi64_epi8( @@ -588,6 +611,7 @@ pub unsafe fn _mm256_maskz_gf2p8affineinv_epi64_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_gf2p8affineinv_epi64_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_gf2p8affineinv_epi64_epi8( @@ -614,6 +638,7 @@ pub unsafe fn _mm256_mask_gf2p8affineinv_epi64_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_gf2p8affineinv_epi64_epi8) #[inline] #[target_feature(enable = "gfni")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(gf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_gf2p8affineinv_epi64_epi8(x: __m128i, a: __m128i) -> __m128i { @@ -638,6 +663,7 @@ pub unsafe fn _mm_gf2p8affineinv_epi64_epi8(x: __m128i, a: __m128i /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_gf2p8affineinv_epi64_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_gf2p8affineinv_epi64_epi8( @@ -667,6 +693,7 @@ pub unsafe fn _mm_maskz_gf2p8affineinv_epi64_epi8( /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_gf2p8affineinv_epi64_epi8) #[inline] #[target_feature(enable = "gfni,avx512bw,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_gf2p8affineinv_epi64_epi8( @@ -827,6 +854,7 @@ mod tests { } #[target_feature(enable = "sse2")] + #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] unsafe fn load_m128i_word(data: &[T], word_index: usize) -> __m128i { let byte_offset = word_index * 16 / size_of::(); let pointer = data.as_ptr().add(byte_offset) as *const __m128i; @@ -834,6 +862,7 @@ mod tests { } #[target_feature(enable = "avx")] + #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] unsafe fn load_m256i_word(data: &[T], word_index: usize) -> __m256i { let byte_offset = word_index * 32 / size_of::(); let pointer = data.as_ptr().add(byte_offset) as *const __m256i; @@ -841,6 +870,7 @@ mod tests { } #[target_feature(enable = "avx512f")] + #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] unsafe fn load_m512i_word(data: &[T], word_index: usize) -> __m512i { let byte_offset = word_index * 64 / size_of::(); let pointer = data.as_ptr().add(byte_offset) as *const i32; diff --git a/crates/core_arch/src/x86/mod.rs b/crates/core_arch/src/x86/mod.rs index c5e457ae71..1421f6b089 100644 --- a/crates/core_arch/src/x86/mod.rs +++ b/crates/core_arch/src/x86/mod.rs @@ -258,7 +258,7 @@ types! { /// /// Note that this means that an instance of `__m512i` typically just means /// a "bag of bits" which is left up to interpretation at the point of use. - #[stable(feature = "simd_avx512_types", since = "CURRENT_RUSTC_VERSION")] + #[stable(feature = "simd_avx512_types", since = "1.72.0")] pub struct __m512i(i64, i64, i64, i64, i64, i64, i64, i64); /// 512-bit wide set of sixteen `f32` types, x86-specific @@ -276,7 +276,7 @@ types! { /// Most intrinsics using `__m512` are prefixed with `_mm512_` and are /// suffixed with "ps" (or otherwise contain "ps"). Not to be confused with /// "pd" which is used for `__m512d`. - #[stable(feature = "simd_avx512_types", since = "CURRENT_RUSTC_VERSION")] + #[stable(feature = "simd_avx512_types", since = "1.72.0")] pub struct __m512( f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, @@ -297,7 +297,7 @@ types! { /// Most intrinsics using `__m512d` are prefixed with `_mm512_` and are /// suffixed with "pd" (or otherwise contain "pd"). Not to be confused with /// "ps" which is used for `__m512`. - #[stable(feature = "simd_avx512_types", since = "CURRENT_RUSTC_VERSION")] + #[stable(feature = "simd_avx512_types", since = "1.72.0")] pub struct __m512d(f64, f64, f64, f64, f64, f64, f64, f64); /// 128-bit wide set of eight `u16` types, x86-specific @@ -305,6 +305,7 @@ types! { /// This type is representing a 128-bit SIMD register which internally is consisted of /// eight packed `u16` instances. Its purpose is for bf16 related intrinsic /// implementations. + #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub struct __m128bh(u16, u16, u16, u16, u16, u16, u16, u16); /// 256-bit wide set of 16 `u16` types, x86-specific @@ -313,6 +314,7 @@ types! { /// representing a 256-bit SIMD register which internally is consisted of /// 16 packed `u16` instances. Its purpose is for bf16 related intrinsic /// implementations. + #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub struct __m256bh( u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16 @@ -324,6 +326,7 @@ types! { /// representing a 512-bit SIMD register which internally is consisted of /// 32 packed `u16` instances. Its purpose is for bf16 related intrinsic /// implementations. + #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub struct __m512bh( u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, @@ -334,34 +337,42 @@ types! { /// The `__mmask64` type used in AVX-512 intrinsics, a 64-bit integer #[allow(non_camel_case_types)] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub type __mmask64 = u64; /// The `__mmask32` type used in AVX-512 intrinsics, a 32-bit integer #[allow(non_camel_case_types)] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub type __mmask32 = u32; /// The `__mmask16` type used in AVX-512 intrinsics, a 16-bit integer #[allow(non_camel_case_types)] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub type __mmask16 = u16; /// The `__mmask8` type used in AVX-512 intrinsics, a 8-bit integer #[allow(non_camel_case_types)] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub type __mmask8 = u8; /// The `_MM_CMPINT_ENUM` type used to specify comparison operations in AVX-512 intrinsics. #[allow(non_camel_case_types)] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub type _MM_CMPINT_ENUM = i32; /// The `MM_MANTISSA_NORM_ENUM` type used to specify mantissa normalized operations in AVX-512 intrinsics. #[allow(non_camel_case_types)] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub type _MM_MANTISSA_NORM_ENUM = i32; /// The `MM_MANTISSA_SIGN_ENUM` type used to specify mantissa signed operations in AVX-512 intrinsics. #[allow(non_camel_case_types)] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub type _MM_MANTISSA_SIGN_ENUM = i32; /// The `MM_PERM_ENUM` type used to specify shuffle operations in AVX-512 intrinsics. #[allow(non_camel_case_types)] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub type _MM_PERM_ENUM = i32; #[cfg(test)] @@ -370,7 +381,6 @@ mod test; pub use self::test::*; #[allow(non_camel_case_types)] -#[unstable(feature = "stdsimd_internal", issue = "none")] pub(crate) trait m128iExt: Sized { fn as_m128i(self) -> __m128i; @@ -423,7 +433,6 @@ impl m128iExt for __m128i { } #[allow(non_camel_case_types)] -#[unstable(feature = "stdsimd_internal", issue = "none")] pub(crate) trait m256iExt: Sized { fn as_m256i(self) -> __m256i; @@ -476,7 +485,6 @@ impl m256iExt for __m256i { } #[allow(non_camel_case_types)] -#[unstable(feature = "stdsimd_internal", issue = "none")] pub(crate) trait m128Ext: Sized { fn as_m128(self) -> __m128; @@ -494,7 +502,6 @@ impl m128Ext for __m128 { } #[allow(non_camel_case_types)] -#[unstable(feature = "stdsimd_internal", issue = "none")] pub(crate) trait m128dExt: Sized { fn as_m128d(self) -> __m128d; @@ -512,7 +519,6 @@ impl m128dExt for __m128d { } #[allow(non_camel_case_types)] -#[unstable(feature = "stdsimd_internal", issue = "none")] pub(crate) trait m256Ext: Sized { fn as_m256(self) -> __m256; @@ -530,7 +536,6 @@ impl m256Ext for __m256 { } #[allow(non_camel_case_types)] -#[unstable(feature = "stdsimd_internal", issue = "none")] pub(crate) trait m256dExt: Sized { fn as_m256d(self) -> __m256d; @@ -548,7 +553,6 @@ impl m256dExt for __m256d { } #[allow(non_camel_case_types)] -#[unstable(feature = "stdsimd_internal", issue = "none")] pub(crate) trait m512iExt: Sized { fn as_m512i(self) -> __m512i; @@ -601,7 +605,6 @@ impl m512iExt for __m512i { } #[allow(non_camel_case_types)] -#[unstable(feature = "stdsimd_internal", issue = "none")] pub(crate) trait m512Ext: Sized { fn as_m512(self) -> __m512; @@ -619,7 +622,6 @@ impl m512Ext for __m512 { } #[allow(non_camel_case_types)] -#[unstable(feature = "stdsimd_internal", issue = "none")] pub(crate) trait m512dExt: Sized { fn as_m512d(self) -> __m512d; @@ -637,7 +639,6 @@ impl m512dExt for __m512d { } #[allow(non_camel_case_types)] -#[unstable(feature = "stdsimd_internal", issue = "none")] pub(crate) trait m128bhExt: Sized { fn as_m128bh(self) -> __m128bh; @@ -670,7 +671,6 @@ impl m128bhExt for __m128bh { } #[allow(non_camel_case_types)] -#[unstable(feature = "stdsimd_internal", issue = "none")] pub(crate) trait m256bhExt: Sized { fn as_m256bh(self) -> __m256bh; @@ -703,7 +703,6 @@ impl m256bhExt for __m256bh { } #[allow(non_camel_case_types)] -#[unstable(feature = "stdsimd_internal", issue = "none")] pub(crate) trait m512bhExt: Sized { fn as_m512bh(self) -> __m512bh; @@ -736,121 +735,162 @@ impl m512bhExt for __m512bh { } mod eflags; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::eflags::*; mod fxsr; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::fxsr::*; mod bswap; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::bswap::*; mod rdtsc; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::rdtsc::*; mod cpuid; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::cpuid::*; mod xsave; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::xsave::*; mod sse; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::sse::*; mod sse2; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::sse2::*; mod sse3; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::sse3::*; mod ssse3; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::ssse3::*; mod sse41; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::sse41::*; mod sse42; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::sse42::*; mod avx; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::avx::*; mod avx2; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::avx2::*; mod fma; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::fma::*; mod abm; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::abm::*; mod bmi1; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::bmi1::*; mod bmi2; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::bmi2::*; #[cfg(not(stdarch_intel_sde))] mod sse4a; #[cfg(not(stdarch_intel_sde))] +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::sse4a::*; #[cfg(not(stdarch_intel_sde))] mod tbm; #[cfg(not(stdarch_intel_sde))] +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::tbm::*; mod pclmulqdq; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::pclmulqdq::*; mod aes; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::aes::*; mod rdrand; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::rdrand::*; mod sha; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::sha::*; mod adx; +#[stable(feature = "simd_x86_adx", since = "1.33.0")] pub use self::adx::*; #[cfg(test)] use stdarch_test::assert_instr; mod avx512f; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub use self::avx512f::*; mod avx512bw; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub use self::avx512bw::*; mod avx512cd; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub use self::avx512cd::*; mod avx512ifma; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub use self::avx512ifma::*; mod avx512vbmi; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub use self::avx512vbmi::*; mod avx512vbmi2; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub use self::avx512vbmi2::*; mod avx512vnni; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub use self::avx512vnni::*; mod avx512bitalg; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub use self::avx512bitalg::*; mod gfni; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub use self::gfni::*; mod avx512vpopcntdq; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub use self::avx512vpopcntdq::*; mod vaes; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub use self::vaes::*; mod vpclmulqdq; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub use self::vpclmulqdq::*; mod bt; +#[stable(feature = "simd_x86_bittest", since = "1.55.0")] pub use self::bt::*; mod rtm; +#[unstable(feature = "stdarch_x86_rtm", issue = "111138")] pub use self::rtm::*; mod f16c; +#[stable(feature = "x86_f16c_intrinsics", since = "1.68.0")] pub use self::f16c::*; mod avx512bf16; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub use self::avx512bf16::*; diff --git a/crates/core_arch/src/x86/rtm.rs b/crates/core_arch/src/x86/rtm.rs index ea1e80057d..ad9f8d2ad2 100644 --- a/crates/core_arch/src/x86/rtm.rs +++ b/crates/core_arch/src/x86/rtm.rs @@ -28,26 +28,33 @@ extern "C" { } /// Transaction successfully started. +#[unstable(feature = "stdarch_x86_rtm", issue = "111138")] pub const _XBEGIN_STARTED: u32 = !0; /// Transaction explicitly aborted with xabort. The parameter passed to xabort is available with /// `_xabort_code(status)`. #[allow(clippy::identity_op)] +#[unstable(feature = "stdarch_x86_rtm", issue = "111138")] pub const _XABORT_EXPLICIT: u32 = 1 << 0; /// Transaction retry is possible. +#[unstable(feature = "stdarch_x86_rtm", issue = "111138")] pub const _XABORT_RETRY: u32 = 1 << 1; /// Transaction abort due to a memory conflict with another thread. +#[unstable(feature = "stdarch_x86_rtm", issue = "111138")] pub const _XABORT_CONFLICT: u32 = 1 << 2; /// Transaction abort due to the transaction using too much memory. +#[unstable(feature = "stdarch_x86_rtm", issue = "111138")] pub const _XABORT_CAPACITY: u32 = 1 << 3; /// Transaction abort due to a debug trap. +#[unstable(feature = "stdarch_x86_rtm", issue = "111138")] pub const _XABORT_DEBUG: u32 = 1 << 4; /// Transaction abort in a inner nested transaction. +#[unstable(feature = "stdarch_x86_rtm", issue = "111138")] pub const _XABORT_NESTED: u32 = 1 << 5; /// Specifies the start of a restricted transactional memory (RTM) code region and returns a value @@ -57,6 +64,7 @@ pub const _XABORT_NESTED: u32 = 1 << 5; #[inline] #[target_feature(enable = "rtm")] #[cfg_attr(test, assert_instr(xbegin))] +#[unstable(feature = "stdarch_x86_rtm", issue = "111138")] pub unsafe fn _xbegin() -> u32 { x86_xbegin() as _ } @@ -67,6 +75,7 @@ pub unsafe fn _xbegin() -> u32 { #[inline] #[target_feature(enable = "rtm")] #[cfg_attr(test, assert_instr(xend))] +#[unstable(feature = "stdarch_x86_rtm", issue = "111138")] pub unsafe fn _xend() { x86_xend() } @@ -78,6 +87,7 @@ pub unsafe fn _xend() { #[target_feature(enable = "rtm")] #[cfg_attr(test, assert_instr(xabort, IMM8 = 0x0))] #[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_x86_rtm", issue = "111138")] pub unsafe fn _xabort() { static_assert_uimm_bits!(IMM8, 8); x86_xabort(IMM8 as i8) @@ -90,6 +100,7 @@ pub unsafe fn _xabort() { #[inline] #[target_feature(enable = "rtm")] #[cfg_attr(test, assert_instr(xtest))] +#[unstable(feature = "stdarch_x86_rtm", issue = "111138")] pub unsafe fn _xtest() -> u8 { x86_xtest() as _ } @@ -97,6 +108,7 @@ pub unsafe fn _xtest() -> u8 { /// Retrieves the parameter passed to [`_xabort`] when [`_xbegin`]'s status has the /// `_XABORT_EXPLICIT` flag set. #[inline] +#[unstable(feature = "stdarch_x86_rtm", issue = "111138")] pub const fn _xabort_code(status: u32) -> u32 { (status >> 24) & 0xFF } diff --git a/crates/core_arch/src/x86/sse.rs b/crates/core_arch/src/x86/sse.rs index 6a2be09216..e3dc8c535b 100644 --- a/crates/core_arch/src/x86/sse.rs +++ b/crates/core_arch/src/x86/sse.rs @@ -984,7 +984,7 @@ pub unsafe fn _mm_setzero_ps() -> __m128 { /// permute intrinsics. #[inline] #[allow(non_snake_case)] -#[unstable(feature = "stdarch", issue = "27731")] +#[unstable(feature = "stdarch_x86_mm_shuffle", issue = "111147")] pub const fn _MM_SHUFFLE(z: u32, y: u32, x: u32, w: u32) -> i32 { ((z << 6) | (y << 4) | (x << 2) | w) as i32 } diff --git a/crates/core_arch/src/x86/vaes.rs b/crates/core_arch/src/x86/vaes.rs index dc24ae0254..2c3cead874 100644 --- a/crates/core_arch/src/x86/vaes.rs +++ b/crates/core_arch/src/x86/vaes.rs @@ -39,6 +39,7 @@ extern "C" { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_aesenc_epi128) #[inline] #[target_feature(enable = "vaes")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaesenc))] pub unsafe fn _mm256_aesenc_epi128(a: __m256i, round_key: __m256i) -> __m256i { aesenc_256(a, round_key) @@ -50,6 +51,7 @@ pub unsafe fn _mm256_aesenc_epi128(a: __m256i, round_key: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_aesenclast_epi128) #[inline] #[target_feature(enable = "vaes")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaesenclast))] pub unsafe fn _mm256_aesenclast_epi128(a: __m256i, round_key: __m256i) -> __m256i { aesenclast_256(a, round_key) @@ -61,6 +63,7 @@ pub unsafe fn _mm256_aesenclast_epi128(a: __m256i, round_key: __m256i) -> __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_aesdec_epi128) #[inline] #[target_feature(enable = "vaes")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaesdec))] pub unsafe fn _mm256_aesdec_epi128(a: __m256i, round_key: __m256i) -> __m256i { aesdec_256(a, round_key) @@ -72,6 +75,7 @@ pub unsafe fn _mm256_aesdec_epi128(a: __m256i, round_key: __m256i) -> __m256i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_aesdeclast_epi128) #[inline] #[target_feature(enable = "vaes")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaesdeclast))] pub unsafe fn _mm256_aesdeclast_epi128(a: __m256i, round_key: __m256i) -> __m256i { aesdeclast_256(a, round_key) @@ -83,6 +87,7 @@ pub unsafe fn _mm256_aesdeclast_epi128(a: __m256i, round_key: __m256i) -> __m256 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_aesenc_epi128) #[inline] #[target_feature(enable = "vaes,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaesenc))] pub unsafe fn _mm512_aesenc_epi128(a: __m512i, round_key: __m512i) -> __m512i { aesenc_512(a, round_key) @@ -94,6 +99,7 @@ pub unsafe fn _mm512_aesenc_epi128(a: __m512i, round_key: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_aesenclast_epi128) #[inline] #[target_feature(enable = "vaes,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaesenclast))] pub unsafe fn _mm512_aesenclast_epi128(a: __m512i, round_key: __m512i) -> __m512i { aesenclast_512(a, round_key) @@ -105,6 +111,7 @@ pub unsafe fn _mm512_aesenclast_epi128(a: __m512i, round_key: __m512i) -> __m512 /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_aesdec_epi128) #[inline] #[target_feature(enable = "vaes,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaesdec))] pub unsafe fn _mm512_aesdec_epi128(a: __m512i, round_key: __m512i) -> __m512i { aesdec_512(a, round_key) @@ -116,6 +123,7 @@ pub unsafe fn _mm512_aesdec_epi128(a: __m512i, round_key: __m512i) -> __m512i { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_aesdeclast_epi128) #[inline] #[target_feature(enable = "vaes,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vaesdeclast))] pub unsafe fn _mm512_aesdeclast_epi128(a: __m512i, round_key: __m512i) -> __m512i { aesdeclast_512(a, round_key) diff --git a/crates/core_arch/src/x86/vpclmulqdq.rs b/crates/core_arch/src/x86/vpclmulqdq.rs index 269eda1d93..37bbd502e5 100644 --- a/crates/core_arch/src/x86/vpclmulqdq.rs +++ b/crates/core_arch/src/x86/vpclmulqdq.rs @@ -33,6 +33,7 @@ extern "C" { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_clmulepi64_epi128) #[inline] #[target_feature(enable = "vpclmulqdq,avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] // technically according to Intel's documentation we don't need avx512f here, however LLVM gets confused otherwise #[cfg_attr(test, assert_instr(vpclmul, IMM8 = 0))] #[rustc_legacy_const_generics(2)] @@ -51,6 +52,7 @@ pub unsafe fn _mm512_clmulepi64_epi128(a: __m512i, b: __m512i) /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_clmulepi64_epi128) #[inline] #[target_feature(enable = "vpclmulqdq")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpclmul, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_clmulepi64_epi128(a: __m256i, b: __m256i) -> __m256i { diff --git a/crates/core_arch/src/x86_64/avx512f.rs b/crates/core_arch/src/x86_64/avx512f.rs index bace11d13f..2c1a780f42 100644 --- a/crates/core_arch/src/x86_64/avx512f.rs +++ b/crates/core_arch/src/x86_64/avx512f.rs @@ -11,6 +11,7 @@ use stdarch_test::assert_instr; /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_i64&expand=1792) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2si))] pub unsafe fn _mm_cvtsd_i64(a: __m128d) -> i64 { _mm_cvtsd_si64(a) @@ -21,6 +22,7 @@ pub unsafe fn _mm_cvtsd_i64(a: __m128d) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_i64&expand=1894) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2si))] pub unsafe fn _mm_cvtss_i64(a: __m128) -> i64 { _mm_cvtss_si64(a) @@ -31,6 +33,7 @@ pub unsafe fn _mm_cvtss_i64(a: __m128) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_u64&expand=1902) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2usi))] pub unsafe fn _mm_cvtss_u64(a: __m128) -> u64 { vcvtss2usi64(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION) @@ -41,6 +44,7 @@ pub unsafe fn _mm_cvtss_u64(a: __m128) -> u64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_u64&expand=1800) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2usi))] pub unsafe fn _mm_cvtsd_u64(a: __m128d) -> u64 { vcvtsd2usi64(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION) @@ -51,6 +55,7 @@ pub unsafe fn _mm_cvtsd_u64(a: __m128d) -> u64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvti32_ss&expand=1643) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsi2ss))] pub unsafe fn _mm_cvti64_ss(a: __m128, b: i64) -> __m128 { let b = b as f32; @@ -62,6 +67,7 @@ pub unsafe fn _mm_cvti64_ss(a: __m128, b: i64) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvti64_sd&expand=1644) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsi2sd))] pub unsafe fn _mm_cvti64_sd(a: __m128d, b: i64) -> __m128d { let b = b as f64; @@ -73,6 +79,7 @@ pub unsafe fn _mm_cvti64_sd(a: __m128d, b: i64) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtu64_ss&expand=2035) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtusi2ss))] pub unsafe fn _mm_cvtu64_ss(a: __m128, b: u64) -> __m128 { let b = b as f32; @@ -84,6 +91,7 @@ pub unsafe fn _mm_cvtu64_ss(a: __m128, b: u64) -> __m128 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtu64_sd&expand=2034) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtusi2sd))] pub unsafe fn _mm_cvtu64_sd(a: __m128d, b: u64) -> __m128d { let b = b as f64; @@ -95,6 +103,7 @@ pub unsafe fn _mm_cvtu64_sd(a: __m128d, b: u64) -> __m128d { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_i64&expand=2016) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2si))] pub unsafe fn _mm_cvttsd_i64(a: __m128d) -> i64 { vcvtsd2si64(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION) @@ -105,6 +114,7 @@ pub unsafe fn _mm_cvttsd_i64(a: __m128d) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_u64&expand=2021) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2usi))] pub unsafe fn _mm_cvttsd_u64(a: __m128d) -> u64 { vcvtsd2usi64(a.as_f64x2(), _MM_FROUND_CUR_DIRECTION) @@ -115,6 +125,7 @@ pub unsafe fn _mm_cvttsd_u64(a: __m128d) -> u64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=#text=_mm_cvttss_i64&expand=2023) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2si))] pub unsafe fn _mm_cvttss_i64(a: __m128) -> i64 { vcvtss2si64(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION) @@ -125,6 +136,7 @@ pub unsafe fn _mm_cvttss_i64(a: __m128) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttss_u64&expand=2027) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2usi))] pub unsafe fn _mm_cvttss_u64(a: __m128) -> u64 { vcvtss2usi64(a.as_f32x4(), _MM_FROUND_CUR_DIRECTION) @@ -141,6 +153,7 @@ pub unsafe fn _mm_cvttss_u64(a: __m128) -> u64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundi64_sd&expand=1313) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsi2sd, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_cvt_roundi64_sd(a: __m128d, b: i64) -> __m128d { @@ -161,6 +174,7 @@ pub unsafe fn _mm_cvt_roundi64_sd(a: __m128d, b: i64) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundsi64_sd&expand=1367) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsi2sd, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_cvt_roundsi64_sd(a: __m128d, b: i64) -> __m128d { @@ -181,6 +195,7 @@ pub unsafe fn _mm_cvt_roundsi64_sd(a: __m128d, b: i64) -> _ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundi64_ss&expand=1314) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsi2ss, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_cvt_roundi64_ss(a: __m128, b: i64) -> __m128 { @@ -201,6 +216,7 @@ pub unsafe fn _mm_cvt_roundi64_ss(a: __m128, b: i64) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundu64_sd&expand=1379) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtusi2sd, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_cvt_roundu64_sd(a: __m128d, b: u64) -> __m128d { @@ -221,6 +237,7 @@ pub unsafe fn _mm_cvt_roundu64_sd(a: __m128d, b: u64) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundsi64_ss&expand=1368) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsi2ss, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_cvt_roundsi64_ss(a: __m128, b: i64) -> __m128 { @@ -241,6 +258,7 @@ pub unsafe fn _mm_cvt_roundsi64_ss(a: __m128, b: i64) -> __ /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundu64_ss&expand=1380) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtusi2ss, ROUNDING = 8))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_cvt_roundu64_ss(a: __m128, b: u64) -> __m128 { @@ -261,6 +279,7 @@ pub unsafe fn _mm_cvt_roundu64_ss(a: __m128, b: u64) -> __m /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundsd_si64&expand=1360) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2si, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvt_roundsd_si64(a: __m128d) -> i64 { @@ -280,6 +299,7 @@ pub unsafe fn _mm_cvt_roundsd_si64(a: __m128d) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundsd_i64&expand=1358) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2si, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvt_roundsd_i64(a: __m128d) -> i64 { @@ -299,6 +319,7 @@ pub unsafe fn _mm_cvt_roundsd_i64(a: __m128d) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundsd_u64&expand=1365) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2usi, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvt_roundsd_u64(a: __m128d) -> u64 { @@ -318,6 +339,7 @@ pub unsafe fn _mm_cvt_roundsd_u64(a: __m128d) -> u64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundss_si64&expand=1375) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2si, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvt_roundss_si64(a: __m128) -> i64 { @@ -337,6 +359,7 @@ pub unsafe fn _mm_cvt_roundss_si64(a: __m128) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundss_i64&expand=1370) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2si, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvt_roundss_i64(a: __m128) -> i64 { @@ -356,6 +379,7 @@ pub unsafe fn _mm_cvt_roundss_i64(a: __m128) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundss_u64&expand=1377) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2usi, ROUNDING = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvt_roundss_u64(a: __m128) -> u64 { @@ -370,6 +394,7 @@ pub unsafe fn _mm_cvt_roundss_u64(a: __m128) -> u64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_roundsd_si64&expand=1931) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2si, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvtt_roundsd_si64(a: __m128d) -> i64 { @@ -384,6 +409,7 @@ pub unsafe fn _mm_cvtt_roundsd_si64(a: __m128d) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_roundsd_i64&expand=1929) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2si, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvtt_roundsd_i64(a: __m128d) -> i64 { @@ -398,6 +424,7 @@ pub unsafe fn _mm_cvtt_roundsd_i64(a: __m128d) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_roundsd_u64&expand=1933) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtsd2usi, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvtt_roundsd_u64(a: __m128d) -> u64 { @@ -412,6 +439,7 @@ pub unsafe fn _mm_cvtt_roundsd_u64(a: __m128d) -> u64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_roundss_i64&expand=1935) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2si, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvtt_roundss_i64(a: __m128) -> i64 { @@ -426,6 +454,7 @@ pub unsafe fn _mm_cvtt_roundss_i64(a: __m128) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_roundss_si64&expand=1937) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2si, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvtt_roundss_si64(a: __m128) -> i64 { @@ -440,6 +469,7 @@ pub unsafe fn _mm_cvtt_roundss_si64(a: __m128) -> i64 { /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_roundss_u64&expand=1939) #[inline] #[target_feature(enable = "avx512f")] +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vcvtss2usi, SAE = 8))] #[rustc_legacy_const_generics(1)] pub unsafe fn _mm_cvtt_roundss_u64(a: __m128) -> u64 { diff --git a/crates/core_arch/src/x86_64/mod.rs b/crates/core_arch/src/x86_64/mod.rs index 461874ece0..708dc90823 100644 --- a/crates/core_arch/src/x86_64/mod.rs +++ b/crates/core_arch/src/x86_64/mod.rs @@ -4,52 +4,68 @@ mod macros; mod fxsr; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::fxsr::*; mod sse; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::sse::*; mod sse2; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::sse2::*; mod sse41; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::sse41::*; mod sse42; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::sse42::*; mod xsave; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::xsave::*; mod abm; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::abm::*; mod avx; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::avx::*; mod bmi; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::bmi::*; - mod bmi2; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::bmi2::*; mod avx2; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::avx2::*; mod avx512f; +#[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub use self::avx512f::*; mod bswap; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::bswap::*; mod rdrand; +#[stable(feature = "simd_x86", since = "1.27.0")] pub use self::rdrand::*; mod cmpxchg16b; +#[stable(feature = "cmpxchg16b_intrinsic", since = "1.67.0")] pub use self::cmpxchg16b::*; mod adx; +#[stable(feature = "simd_x86_adx", since = "1.33.0")] pub use self::adx::*; mod bt; +#[stable(feature = "simd_x86_bittest", since = "1.55.0")] pub use self::bt::*; diff --git a/crates/intrinsic-test/src/main.rs b/crates/intrinsic-test/src/main.rs index 15bc021c75..3c181584be 100644 --- a/crates/intrinsic-test/src/main.rs +++ b/crates/intrinsic-test/src/main.rs @@ -166,7 +166,14 @@ fn generate_rust_program(notices: &str, intrinsic: &Intrinsic, a32: bool) -> Str format!( r#"{notices}#![feature(simd_ffi)] #![feature(link_llvm_intrinsics)] -#![feature(stdsimd)] +#![cfg_attr(target_arch = "arm", feature(stdarch_arm_neon_intrinsics))] +#![feature(stdarch_arm_crc32)] +#![cfg_attr(target_arch = "aarch64", feature(stdarch_neon_fcma))] +#![cfg_attr(target_arch = "aarch64", feature(stdarch_neon_dotprod))] +#![cfg_attr(target_arch = "aarch64", feature(stdarch_neon_i8mm))] +#![cfg_attr(target_arch = "aarch64", feature(stdarch_neon_sha3))] +#![cfg_attr(target_arch = "aarch64", feature(stdarch_neon_sm4))] +#![cfg_attr(target_arch = "aarch64", feature(stdarch_neon_ftts))] #![allow(overflowing_literals)] #![allow(non_upper_case_globals)] use core_arch::arch::{target_arch}::*; diff --git a/crates/std_detect/README.md b/crates/std_detect/README.md index 5211771047..3611daaf40 100644 --- a/crates/std_detect/README.md +++ b/crates/std_detect/README.md @@ -9,7 +9,7 @@ supports certain features, like SIMD instructions. `std::detect` APIs are available as part of `libstd`. Prefer using it via the standard library than through this crate. Unstable features of `std::detect` are -available on nightly Rust behind the `feature(stdsimd)` feature-gate. +available on nightly Rust behind various feature-gates. If you need run-time feature detection in `#[no_std]` environments, Rust `core` library cannot help you. By design, Rust `core` is platform independent, but diff --git a/crates/std_detect/src/detect/arch/arm.rs b/crates/std_detect/src/detect/arch/arm.rs index fd332e0b2c..50e77fde59 100644 --- a/crates/std_detect/src/detect/arch/arm.rs +++ b/crates/std_detect/src/detect/arch/arm.rs @@ -6,23 +6,23 @@ features! { @MACRO_NAME: is_arm_feature_detected; @MACRO_ATTRS: /// Checks if `arm` feature is enabled. - #[unstable(feature = "stdsimd", issue = "27731")] + #[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")] @NO_RUNTIME_DETECTION: "v7"; @NO_RUNTIME_DETECTION: "vfp2"; @NO_RUNTIME_DETECTION: "vfp3"; @NO_RUNTIME_DETECTION: "vfp4"; - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] neon: "neon"; + @FEATURE: #[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")] neon: "neon"; /// ARM Advanced SIMD (NEON) - Aarch32 - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] pmull: "pmull"; + @FEATURE: #[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")] pmull: "pmull"; /// Polynomial Multiply - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] crc: "crc"; + @FEATURE: #[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")] crc: "crc"; /// CRC32 (Cyclic Redundancy Check) - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] aes: "aes"; + @FEATURE: #[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")] aes: "aes"; /// FEAT_AES (AES instructions) - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] sha2: "sha2"; + @FEATURE: #[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")] sha2: "sha2"; /// FEAT_SHA1 & FEAT_SHA256 (SHA1 & SHA2-256 instructions) - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] i8mm: "i8mm"; + @FEATURE: #[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")] i8mm: "i8mm"; /// FEAT_I8MM (integer matrix multiplication, plus ASIMD support) - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] dotprod: "dotprod"; + @FEATURE: #[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")] dotprod: "dotprod"; /// FEAT_DotProd (Vector Dot-Product - ASIMDDP) } diff --git a/crates/std_detect/src/detect/arch/mips.rs b/crates/std_detect/src/detect/arch/mips.rs index ae27d0093c..e185fdfcaa 100644 --- a/crates/std_detect/src/detect/arch/mips.rs +++ b/crates/std_detect/src/detect/arch/mips.rs @@ -6,7 +6,7 @@ features! { @MACRO_NAME: is_mips_feature_detected; @MACRO_ATTRS: /// Checks if `mips` feature is enabled. - #[unstable(feature = "stdsimd", issue = "27731")] - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] msa: "msa"; + #[unstable(feature = "stdarch_mips_feature_detection", issue = "111188")] + @FEATURE: #[unstable(feature = "stdarch_mips_feature_detection", issue = "111188")] msa: "msa"; /// MIPS SIMD Architecture (MSA) } diff --git a/crates/std_detect/src/detect/arch/mips64.rs b/crates/std_detect/src/detect/arch/mips64.rs index 7182ec2da4..69fe4869d3 100644 --- a/crates/std_detect/src/detect/arch/mips64.rs +++ b/crates/std_detect/src/detect/arch/mips64.rs @@ -6,7 +6,7 @@ features! { @MACRO_NAME: is_mips64_feature_detected; @MACRO_ATTRS: /// Checks if `mips64` feature is enabled. - #[unstable(feature = "stdsimd", issue = "27731")] - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] msa: "msa"; + #[unstable(feature = "stdarch_mips_feature_detection", issue = "111188")] + @FEATURE: #[unstable(feature = "stdarch_mips_feature_detection", issue = "111188")] msa: "msa"; /// MIPS SIMD Architecture (MSA) } diff --git a/crates/std_detect/src/detect/arch/mod.rs b/crates/std_detect/src/detect/arch/mod.rs index 81a1f23e87..af8222dc2b 100644 --- a/crates/std_detect/src/detect/arch/mod.rs +++ b/crates/std_detect/src/detect/arch/mod.rs @@ -22,20 +22,28 @@ mod mips64; cfg_if! { if #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] { + #[stable(feature = "simd_x86", since = "1.27.0")] pub use x86::*; } else if #[cfg(target_arch = "arm")] { + #[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")] pub use arm::*; } else if #[cfg(target_arch = "aarch64")] { + #[stable(feature = "simd_aarch64", since = "1.60.0")] pub use aarch64::*; } else if #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] { + #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] pub use riscv::*; } else if #[cfg(target_arch = "powerpc")] { + #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] pub use powerpc::*; } else if #[cfg(target_arch = "powerpc64")] { + #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] pub use powerpc64::*; } else if #[cfg(target_arch = "mips")] { + #[unstable(feature = "stdarch_mips_feature_detection", issue = "111188")] pub use mips::*; } else if #[cfg(target_arch = "mips64")] { + #[unstable(feature = "stdarch_mips_feature_detection", issue = "111188")] pub use mips64::*; } else { // Unimplemented architecture: @@ -44,6 +52,7 @@ cfg_if! { Null } #[doc(hidden)] + #[unstable(feature = "stdarch_internal", issue = "none")] pub mod __is_feature_detected {} impl Feature { diff --git a/crates/std_detect/src/detect/arch/powerpc.rs b/crates/std_detect/src/detect/arch/powerpc.rs index d135cd95de..fc2ac8963f 100644 --- a/crates/std_detect/src/detect/arch/powerpc.rs +++ b/crates/std_detect/src/detect/arch/powerpc.rs @@ -6,11 +6,11 @@ features! { @MACRO_NAME: is_powerpc_feature_detected; @MACRO_ATTRS: /// Checks if `powerpc` feature is enabled. - #[unstable(feature = "stdsimd", issue = "27731")] - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] altivec: "altivec"; + #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] + @FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] altivec: "altivec"; /// Altivec - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] vsx: "vsx"; + @FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] vsx: "vsx"; /// VSX - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] power8: "power8"; + @FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] power8: "power8"; /// Power8 } diff --git a/crates/std_detect/src/detect/arch/powerpc64.rs b/crates/std_detect/src/detect/arch/powerpc64.rs index 773afd6ceb..579bdc50ca 100644 --- a/crates/std_detect/src/detect/arch/powerpc64.rs +++ b/crates/std_detect/src/detect/arch/powerpc64.rs @@ -6,11 +6,11 @@ features! { @MACRO_NAME: is_powerpc64_feature_detected; @MACRO_ATTRS: /// Checks if `powerpc` feature is enabled. - #[unstable(feature = "stdsimd", issue = "27731")] - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] altivec: "altivec"; + #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] + @FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] altivec: "altivec"; /// Altivec - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] vsx: "vsx"; + @FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] vsx: "vsx"; /// VSX - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] power8: "power8"; + @FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] power8: "power8"; /// Power8 } diff --git a/crates/std_detect/src/detect/arch/riscv.rs b/crates/std_detect/src/detect/arch/riscv.rs index 5ea36e7c1c..1d2d32a014 100644 --- a/crates/std_detect/src/detect/arch/riscv.rs +++ b/crates/std_detect/src/detect/arch/riscv.rs @@ -99,108 +99,108 @@ features! { /// * Zkt: `"zkt"` /// /// [ISA manual]: https://github.com/riscv/riscv-isa-manual/ - #[unstable(feature = "stdsimd", issue = "27731")] - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] rv32i: "rv32i"; + #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] rv32i: "rv32i"; /// RV32I Base Integer Instruction Set - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zifencei: "zifencei"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zifencei: "zifencei"; /// "Zifencei" Instruction-Fetch Fence - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zihintpause: "zihintpause"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zihintpause: "zihintpause"; /// "Zihintpause" Pause Hint - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] rv64i: "rv64i"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] rv64i: "rv64i"; /// RV64I Base Integer Instruction Set - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] m: "m"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] m: "m"; /// "M" Standard Extension for Integer Multiplication and Division - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] a: "a"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] a: "a"; /// "A" Standard Extension for Atomic Instructions - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zicsr: "zicsr"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zicsr: "zicsr"; /// "Zicsr", Control and Status Register (CSR) Instructions - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zicntr: "zicntr"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zicntr: "zicntr"; /// "Zicntr", Standard Extension for Base Counters and Timers - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zihpm: "zihpm"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zihpm: "zihpm"; /// "Zihpm", Standard Extension for Hardware Performance Counters - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] f: "f"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] f: "f"; /// "F" Standard Extension for Single-Precision Floating-Point - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] d: "d"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] d: "d"; /// "D" Standard Extension for Double-Precision Floating-Point - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] q: "q"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] q: "q"; /// "Q" Standard Extension for Quad-Precision Floating-Point - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] c: "c"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] c: "c"; /// "C" Standard Extension for Compressed Instructions - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zfinx: "zfinx"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zfinx: "zfinx"; /// "Zfinx" Standard Extension for Single-Precision Floating-Point in Integer Registers - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zdinx: "zdinx"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zdinx: "zdinx"; /// "Zdinx" Standard Extension for Double-Precision Floating-Point in Integer Registers - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zhinx: "zhinx"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zhinx: "zhinx"; /// "Zhinx" Standard Extension for Half-Precision Floating-Point in Integer Registers - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zhinxmin: "zhinxmin"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zhinxmin: "zhinxmin"; /// "Zhinxmin" Standard Extension for Minimal Half-Precision Floating-Point in Integer Registers - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] ztso: "ztso"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] ztso: "ztso"; /// "Ztso" Standard Extension for Total Store Ordering - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] rv32e: "rv32e"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] rv32e: "rv32e"; /// RV32E Base Integer Instruction Set - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] rv128i: "rv128i"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] rv128i: "rv128i"; /// RV128I Base Integer Instruction Set - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zfh: "zfh"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zfh: "zfh"; /// "Zfh" Standard Extension for 16-Bit Half-Precision Floating-Point - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zfhmin: "zfhmin"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zfhmin: "zfhmin"; /// "Zfhmin" Standard Extension for Minimal Half-Precision Floating-Point Support - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] b: "b"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] b: "b"; /// "B" Standard Extension for Bit Manipulation - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] j: "j"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] j: "j"; /// "J" Standard Extension for Dynamically Translated Languages - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] p: "p"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] p: "p"; /// "P" Standard Extension for Packed-SIMD Instructions - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] v: "v"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] v: "v"; /// "V" Standard Extension for Vector Operations - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zam: "zam"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zam: "zam"; /// "Zam" Standard Extension for Misaligned Atomics - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] s: "s"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] s: "s"; /// Supervisor-Level ISA - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] svnapot: "svnapot"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] svnapot: "svnapot"; /// "Svnapot" Standard Extension for NAPOT Translation Contiguity - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] svpbmt: "svpbmt"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] svpbmt: "svpbmt"; /// "Svpbmt" Standard Extension for Page-Based Memory Types - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] svinval: "svinval"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] svinval: "svinval"; /// "Svinval" Standard Extension for Fine-Grained Address-Translation Cache Invalidation - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] h: "h"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] h: "h"; /// Hypervisor Extension - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zba: "zba"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zba: "zba"; /// "Zba" Standard Extension for Address Generation Instructions - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zbb: "zbb"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zbb: "zbb"; /// "Zbb" Standard Extension for Basic Bit-Manipulation - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zbc: "zbc"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zbc: "zbc"; /// "Zbc" Standard Extension for Carry-less Multiplication - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zbs: "zbs"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zbs: "zbs"; /// "Zbs" Standard Extension for Single-Bit instructions - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zbkb: "zbkb"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zbkb: "zbkb"; /// "Zbkb" Standard Extension for Bitmanip instructions for Cryptography - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zbkc: "zbkc"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zbkc: "zbkc"; /// "Zbkc" Standard Extension for Carry-less multiply instructions - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zbkx: "zbkx"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zbkx: "zbkx"; /// "Zbkx" Standard Extension for Crossbar permutation instructions - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zknd: "zknd"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zknd: "zknd"; /// "Zknd" Standard Extension for NIST Suite: AES Decryption - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zkne: "zkne"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zkne: "zkne"; /// "Zkne" Standard Extension for NIST Suite: AES Encryption - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zknh: "zknh"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zknh: "zknh"; /// "Zknh" Standard Extension for NIST Suite: Hash Function Instructions - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zksed: "zksed"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zksed: "zksed"; /// "Zksed" Standard Extension for ShangMi Suite: SM4 Block Cipher Instructions - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zksh: "zksh"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zksh: "zksh"; /// "Zksh" Standard Extension for ShangMi Suite: SM3 Hash Function Instructions - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zkr: "zkr"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zkr: "zkr"; /// "Zkr" Standard Extension for Entropy Source Extension - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zkn: "zkn"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zkn: "zkn"; /// "Zkn" Standard Extension for NIST Algorithm Suite - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zks: "zks"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zks: "zks"; /// "Zks" Standard Extension for ShangMi Algorithm Suite - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zk: "zk"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zk: "zk"; /// "Zk" Standard Extension for Standard scalar cryptography extension - @FEATURE: #[unstable(feature = "stdsimd", issue = "27731")] zkt: "zkt"; + @FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zkt: "zkt"; /// "Zkt" Standard Extension for Data Independent Execution Latency } diff --git a/crates/std_detect/src/detect/macros.rs b/crates/std_detect/src/detect/macros.rs index 45feec79fc..5321ef2252 100644 --- a/crates/std_detect/src/detect/macros.rs +++ b/crates/std_detect/src/detect/macros.rs @@ -1,5 +1,6 @@ #[macro_export] -#[allow_internal_unstable(stdsimd)] +#[allow_internal_unstable(stdarch_internal)] +#[unstable(feature = "stdarch_internal", issue = "none")] macro_rules! detect_feature { ($feature:tt, $feature_lit:tt) => { $crate::detect_feature!($feature, $feature_lit : $feature_lit) @@ -25,7 +26,7 @@ macro_rules! features { ) => { #[macro_export] $(#[$macro_attrs])* - #[allow_internal_unstable(stdsimd_internal, stdsimd)] + #[allow_internal_unstable(stdarch_internal)] #[cfg($cfg)] #[doc(cfg($cfg))] macro_rules! $macro_name { @@ -120,7 +121,7 @@ macro_rules! features { #[allow(non_camel_case_types)] #[derive(Copy, Clone)] #[repr(u8)] - #[unstable(feature = "stdsimd_internal", issue = "none")] + #[unstable(feature = "stdarch_internal", issue = "none")] #[cfg($cfg)] pub(crate) enum Feature { $( @@ -157,6 +158,7 @@ macro_rules! features { /// to change. #[doc(hidden)] #[cfg($cfg)] + #[unstable(feature = "stdarch_internal", issue = "none")] pub mod __is_feature_detected { $( diff --git a/crates/std_detect/src/detect/mod.rs b/crates/std_detect/src/detect/mod.rs index db7018232d..75a2f70db8 100644 --- a/crates/std_detect/src/detect/mod.rs +++ b/crates/std_detect/src/detect/mod.rs @@ -27,6 +27,7 @@ mod arch; // This module needs to be public because the `is_{arch}_feature_detected!` // macros expand calls to items within it in user crates. #[doc(hidden)] +#[unstable(feature = "stdarch_internal", issue = "none")] pub use self::arch::__is_feature_detected; pub(crate) use self::arch::Feature; @@ -81,7 +82,7 @@ fn check_for(x: Feature) -> bool { /// Returns an `Iterator` where /// `Item.0` is the feature name, and `Item.1` is a `bool` which /// is `true` if the feature is supported by the host and `false` otherwise. -#[unstable(feature = "stdsimd", issue = "27731")] +#[unstable(feature = "stdarch_internal", issue = "none")] pub fn features() -> impl Iterator { cfg_if! { if #[cfg(any( diff --git a/crates/std_detect/src/lib.rs b/crates/std_detect/src/lib.rs index 7fdfb872ec..5862ada21d 100644 --- a/crates/std_detect/src/lib.rs +++ b/crates/std_detect/src/lib.rs @@ -12,14 +12,18 @@ //! * `powerpc`: [`is_powerpc_feature_detected`] //! * `powerpc64`: [`is_powerpc64_feature_detected`] -#![unstable(feature = "stdsimd", issue = "27731")] -#![feature(staged_api, stdsimd, doc_cfg, allow_internal_unstable)] +#![stable(feature = "stdsimd", since = "1.27.0")] +#![feature(staged_api, doc_cfg, allow_internal_unstable)] #![deny(rust_2018_idioms)] #![allow(clippy::shadow_reuse)] #![deny(clippy::missing_inline_in_public_items)] #![cfg_attr(test, allow(unused_imports))] #![no_std] #![allow(internal_features)] +// Temporary hack: needed to build against toolchains from before the mass feature renaming. +// Remove this as soon as the stdarch submodule is updated on nightly. +#![allow(stable_features)] +#![feature(stdsimd)] #[cfg(test)] #[macro_use] @@ -31,5 +35,5 @@ extern crate std; extern crate alloc; #[doc(hidden)] -#[unstable(feature = "stdsimd", issue = "27731")] +#[stable(feature = "stdsimd", since = "1.27.0")] pub mod detect; diff --git a/crates/std_detect/tests/cpu-detection.rs b/crates/std_detect/tests/cpu-detection.rs index f93212d24f..8d551fc2cb 100644 --- a/crates/std_detect/tests/cpu-detection.rs +++ b/crates/std_detect/tests/cpu-detection.rs @@ -1,15 +1,20 @@ -#![feature(stdsimd)] +#![feature(stdarch_internal)] +#![cfg_attr(target_arch = "arm", feature(stdarch_arm_feature_detection))] +#![cfg_attr(target_arch = "powerpc", feature(stdarch_powerpc_feature_detection))] +#![cfg_attr(target_arch = "powerpc64", feature(stdarch_powerpc_feature_detection))] #![allow(clippy::unwrap_used, clippy::use_debug, clippy::print_stdout)] -#![cfg(any( - target_arch = "arm", - target_arch = "aarch64", - target_arch = "x86", - target_arch = "x86_64", - target_arch = "powerpc", - target_arch = "powerpc64" -))] -#[macro_use] +#[cfg_attr( + any( + target_arch = "arm", + target_arch = "aarch64", + target_arch = "x86", + target_arch = "x86_64", + target_arch = "powerpc", + target_arch = "powerpc64" + ), + macro_use +)] extern crate std_detect; #[test] diff --git a/crates/std_detect/tests/macro_trailing_commas.rs b/crates/std_detect/tests/macro_trailing_commas.rs index cd597af73c..8304b225f5 100644 --- a/crates/std_detect/tests/macro_trailing_commas.rs +++ b/crates/std_detect/tests/macro_trailing_commas.rs @@ -1,4 +1,6 @@ -#![feature(stdsimd)] +#![cfg_attr(target_arch = "arm", feature(stdarch_arm_feature_detection))] +#![cfg_attr(target_arch = "powerpc", feature(stdarch_powerpc_feature_detection))] +#![cfg_attr(target_arch = "powerpc64", feature(stdarch_powerpc_feature_detection))] #![allow(clippy::unwrap_used, clippy::use_debug, clippy::print_stdout)] #[cfg(any( diff --git a/crates/std_detect/tests/x86-specific.rs b/crates/std_detect/tests/x86-specific.rs index 38512c758d..54bcab7b1e 100644 --- a/crates/std_detect/tests/x86-specific.rs +++ b/crates/std_detect/tests/x86-specific.rs @@ -1,4 +1,3 @@ -#![feature(stdsimd)] #![cfg(any(target_arch = "x86", target_arch = "x86_64"))] extern crate cupid; diff --git a/crates/stdarch-gen/src/main.rs b/crates/stdarch-gen/src/main.rs index 8e2bea0e23..ca003d2bfb 100644 --- a/crates/stdarch-gen/src/main.rs +++ b/crates/stdarch-gen/src/main.rs @@ -516,6 +516,26 @@ impl TargetFeature { } } + /// A stability attribute for the intrinsic. + fn stability(&self, aarch64: bool) -> &str { + // 32-bit intrinsics are all unstable for now + if !aarch64 { + return "unstable(feature = \"stdarch_arm_neon_intrinsics\", issue = \"111800\")"; + } + match *self { + Default | ArmV7 | Vfp4 | FPArmV8 | AES => { + "stable(feature = \"neon_intrinsics\", since = \"1.59.0\")" + } + FCMA => "unstable(feature = \"stdarch_neon_fcma\", issue = \"117222\")", + Dotprod => "unstable(feature = \"stdarch_neon_dotprod\", issue = \"117224\")", + I8MM => "unstable(feature = \"stdarch_neon_i8mm\", issue = \"117223\")", + SHA3 => "unstable(feature = \"stdarch_neon_sha3\", issue = \"117225\")", + RDM => "stable(feature = \"rdm_intrinsics\", since = \"1.62.0\")", + SM4 => "unstable(feature = \"stdarch_neon_sm4\", issue = \"117226\")", + FTTS => "unstable(feature = \"stdarch_neon_ftts\", issue = \"117227\")", + } + } + /// A string for use with #[simd_test(...)] (or `is_arm_feature_detected!(...)`). fn as_simd_test_arg_arm(&self) -> &str { // TODO: Ideally, these would match the target_feature strings (as for AArch64). @@ -1600,18 +1620,13 @@ fn gen_aarch64( } } }; - let stable = match target { - Default | ArmV7 | Vfp4 | FPArmV8 | AES => { - String::from("\n#[stable(feature = \"neon_intrinsics\", since = \"1.59.0\")]") - } - RDM => String::from("\n#[stable(feature = \"rdm_intrinsics\", since = \"1.62.0\")]"), - _ => String::new(), - }; + let stable = target.stability(true); let function = format!( r#" {function_doc} #[inline]{target_feature} -#[cfg_attr(test, assert_instr({current_aarch64}{const_assert}))]{const_legacy}{stable} +#[cfg_attr(test, assert_instr({current_aarch64}{const_assert}))]{const_legacy} +#[{stable}] {fn_decl}{{ {call_params} }} @@ -2510,13 +2525,8 @@ fn gen_arm( fn_decl, multi_calls, ext_c_aarch64, aarch64_params ) }; - let stable_aarch64 = match target { - Default | ArmV7 | Vfp4 | FPArmV8 | AES => { - String::from("\n#[stable(feature = \"neon_intrinsics\", since = \"1.59.0\")]") - } - RDM => String::from("\n#[stable(feature = \"rdm_intrinsics\", since = \"1.62.0\")]"), - _ => String::new(), - }; + let stable_aarch64 = target.stability(true); + let stable_arm = target.stability(false); let function_doc = create_doc_string(current_comment, &name); format!( r#" @@ -2524,12 +2534,14 @@ fn gen_arm( #[inline] #[cfg(target_arch = "arm")]{target_feature_arm} #[cfg_attr(test, assert_instr({assert_arm}{const_assert}))]{const_legacy} +#[{stable_arm}] {call_arm} {function_doc} #[inline] #[cfg(not(target_arch = "arm"))]{target_feature_aarch64} -#[cfg_attr(test, assert_instr({assert_aarch64}{const_assert}))]{const_legacy}{stable_aarch64} +#[cfg_attr(test, assert_instr({assert_aarch64}{const_assert}))]{const_legacy} +#[{stable_aarch64}] {call_aarch64} "#, target_feature_arm = target.to_target_feature_attr_arm(), @@ -2570,17 +2582,16 @@ fn gen_arm( String::new() } }; - let stable_aarch64 = match target { - Default | ArmV7 | Vfp4 | FPArmV8 | AES => String::from("\n#[cfg_attr(not(target_arch = \"arm\"), stable(feature = \"neon_intrinsics\", since = \"1.59.0\"))]"), - RDM => String::from("\n#[cfg_attr(not(target_arch = \"arm\"), stable(feature = \"rdm_intrinsics\", since = \"1.62.0\"))]"), - _ => String::new(), - }; + let stable_aarch64 = target.stability(true); + let stable_arm = target.stability(false); format!( r#" {function_doc} #[inline]{target_feature} #[cfg_attr(all(test, target_arch = "arm"), assert_instr({assert_arm}{const_assert}))] -#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr({assert_aarch64}{const_assert}))]{const_legacy}{stable_aarch64} +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr({assert_aarch64}{const_assert}))]{const_legacy} +#[cfg_attr(not(target_arch = "arm"), {stable_aarch64})] +#[cfg_attr(target_arch = "arm", {stable_arm})] {call} "#, function_doc = create_doc_string(current_comment, &name), diff --git a/crates/stdarch-verify/tests/arm.rs b/crates/stdarch-verify/tests/arm.rs index 84131c0fbe..d6f8e69acb 100644 --- a/crates/stdarch-verify/tests/arm.rs +++ b/crates/stdarch-verify/tests/arm.rs @@ -1,8 +1,9 @@ +#![allow(unused)] + use std::collections::HashMap; use serde::Deserialize; -#[allow(unused)] struct Function { name: &'static str, arguments: &'static [&'static Type], diff --git a/examples/connect5.rs b/examples/connect5.rs index 805108c24c..ffbff5e486 100644 --- a/examples/connect5.rs +++ b/examples/connect5.rs @@ -28,7 +28,9 @@ //! You should see a game self-playing. In the end of the game, it shows the average time for //! each move. -#![feature(stdsimd, avx512_target_feature)] +#![feature(avx512_target_feature)] +#![cfg_attr(target_arch = "x86", feature(stdarch_x86_avx512))] +#![cfg_attr(target_arch = "x86_64", feature(stdarch_x86_avx512))] #![feature(stmt_expr_attributes)] use rand::seq::SliceRandom; diff --git a/examples/hex.rs b/examples/hex.rs index a961793a04..490556e8bf 100644 --- a/examples/hex.rs +++ b/examples/hex.rs @@ -12,7 +12,7 @@ //! //! and you should see `746573740a` get printed out. -#![feature(stdsimd, wasm_target_feature)] +#![feature(wasm_target_feature)] #![cfg_attr(test, feature(test))] #![allow( clippy::unwrap_used, diff --git a/examples/wasm.rs b/examples/wasm.rs index 6b92ae9b87..8a95ed54e1 100644 --- a/examples/wasm.rs +++ b/examples/wasm.rs @@ -1,6 +1,5 @@ //! A simple slab allocator for pages in wasm -#![feature(stdsimd)] #![cfg(target_arch = "wasm32")] use std::ptr;