Skip to main content

core/stdarch/crates/core_arch/src/aarch64/neon/
generated.rs

1// This code is automatically generated. DO NOT MODIFY.
2//
3// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file:
4//
5// ```
6// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec
7// ```
8#![allow(improper_ctypes)]
9
10#[cfg(test)]
11use stdarch_test::assert_instr;
12
13use super::*;
14
15#[doc = "CRC32-C single round checksum for quad words (64 bits)."]
16#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"]
17#[inline(always)]
18#[target_feature(enable = "crc")]
19#[cfg_attr(test, assert_instr(crc32cx))]
20#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
21pub fn __crc32cd(crc: u32, data: u64) -> u32 {
22    unsafe extern "unadjusted" {
23        #[cfg_attr(
24            any(target_arch = "aarch64", target_arch = "arm64ec"),
25            link_name = "llvm.aarch64.crc32cx"
26        )]
27        fn ___crc32cd(crc: u32, data: u64) -> u32;
28    }
29    unsafe { ___crc32cd(crc, data) }
30}
31#[doc = "CRC32 single round checksum for quad words (64 bits)."]
32#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"]
33#[inline(always)]
34#[target_feature(enable = "crc")]
35#[cfg_attr(test, assert_instr(crc32x))]
36#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
37pub fn __crc32d(crc: u32, data: u64) -> u32 {
38    unsafe extern "unadjusted" {
39        #[cfg_attr(
40            any(target_arch = "aarch64", target_arch = "arm64ec"),
41            link_name = "llvm.aarch64.crc32x"
42        )]
43        fn ___crc32d(crc: u32, data: u64) -> u32;
44    }
45    unsafe { ___crc32d(crc, data) }
46}
47#[doc = "Floating-point JavaScript convert to signed fixed-point, rounding toward zero"]
48#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__jcvt)"]
49#[inline(always)]
50#[target_feature(enable = "jsconv")]
51#[cfg_attr(test, assert_instr(fjcvtzs))]
52#[stable(feature = "stdarch_aarch64_jscvt", since = "1.95.0")]
53pub fn __jcvt(a: f64) -> i32 {
54    unsafe extern "unadjusted" {
55        #[cfg_attr(
56            any(target_arch = "aarch64", target_arch = "arm64ec"),
57            link_name = "llvm.aarch64.fjcvtzs"
58        )]
59        fn ___jcvt(a: f64) -> i32;
60    }
61    unsafe { ___jcvt(a) }
62}
63#[doc = "Signed Absolute difference and Accumulate Long"]
64#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"]
65#[inline(always)]
66#[target_feature(enable = "neon")]
67#[stable(feature = "neon_intrinsics", since = "1.59.0")]
68#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
69pub fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
70    unsafe {
71        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
72        let e: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
73        let f: int8x8_t = vabd_s8(d, e);
74        let f: uint8x8_t = simd_cast(f);
75        simd_add(a, simd_cast(f))
76    }
77}
78#[doc = "Signed Absolute difference and Accumulate Long"]
79#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"]
80#[inline(always)]
81#[target_feature(enable = "neon")]
82#[stable(feature = "neon_intrinsics", since = "1.59.0")]
83#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
84pub fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
85    unsafe {
86        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
87        let e: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
88        let f: int16x4_t = vabd_s16(d, e);
89        let f: uint16x4_t = simd_cast(f);
90        simd_add(a, simd_cast(f))
91    }
92}
93#[doc = "Signed Absolute difference and Accumulate Long"]
94#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"]
95#[inline(always)]
96#[target_feature(enable = "neon")]
97#[stable(feature = "neon_intrinsics", since = "1.59.0")]
98#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
99pub fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
100    unsafe {
101        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
102        let e: int32x2_t = simd_shuffle!(c, c, [2, 3]);
103        let f: int32x2_t = vabd_s32(d, e);
104        let f: uint32x2_t = simd_cast(f);
105        simd_add(a, simd_cast(f))
106    }
107}
108#[doc = "Unsigned Absolute difference and Accumulate Long"]
109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"]
110#[inline(always)]
111#[target_feature(enable = "neon")]
112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
113#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
114pub fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
115    unsafe {
116        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
117        let e: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
118        let f: uint8x8_t = vabd_u8(d, e);
119        simd_add(a, simd_cast(f))
120    }
121}
122#[doc = "Unsigned Absolute difference and Accumulate Long"]
123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"]
124#[inline(always)]
125#[target_feature(enable = "neon")]
126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
127#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
128pub fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
129    unsafe {
130        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
131        let e: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
132        let f: uint16x4_t = vabd_u16(d, e);
133        simd_add(a, simd_cast(f))
134    }
135}
136#[doc = "Unsigned Absolute difference and Accumulate Long"]
137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"]
138#[inline(always)]
139#[target_feature(enable = "neon")]
140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
141#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
142pub fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
143    unsafe {
144        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
145        let e: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
146        let f: uint32x2_t = vabd_u32(d, e);
147        simd_add(a, simd_cast(f))
148    }
149}
150#[doc = "Absolute difference between the arguments of Floating"]
151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)"]
152#[inline(always)]
153#[target_feature(enable = "neon")]
154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
155#[cfg_attr(test, assert_instr(fabd))]
156pub fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
157    unsafe extern "unadjusted" {
158        #[cfg_attr(
159            any(target_arch = "aarch64", target_arch = "arm64ec"),
160            link_name = "llvm.aarch64.neon.fabd.v1f64"
161        )]
162        fn _vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
163    }
164    unsafe { _vabd_f64(a, b) }
165}
166#[doc = "Absolute difference between the arguments of Floating"]
167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"]
168#[inline(always)]
169#[target_feature(enable = "neon")]
170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
171#[cfg_attr(test, assert_instr(fabd))]
172pub fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
173    unsafe extern "unadjusted" {
174        #[cfg_attr(
175            any(target_arch = "aarch64", target_arch = "arm64ec"),
176            link_name = "llvm.aarch64.neon.fabd.v2f64"
177        )]
178        fn _vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
179    }
180    unsafe { _vabdq_f64(a, b) }
181}
182#[doc = "Floating-point absolute difference"]
183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)"]
184#[inline(always)]
185#[target_feature(enable = "neon")]
186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
187#[cfg_attr(test, assert_instr(fabd))]
188pub fn vabdd_f64(a: f64, b: f64) -> f64 {
189    unsafe { simd_extract!(vabd_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
190}
191#[doc = "Floating-point absolute difference"]
192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)"]
193#[inline(always)]
194#[target_feature(enable = "neon")]
195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
196#[cfg_attr(test, assert_instr(fabd))]
197pub fn vabds_f32(a: f32, b: f32) -> f32 {
198    unsafe { simd_extract!(vabd_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
199}
200#[doc = "Floating-point absolute difference"]
201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdh_f16)"]
202#[inline(always)]
203#[target_feature(enable = "neon,fp16")]
204#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
205#[cfg(not(target_arch = "arm64ec"))]
206#[cfg_attr(test, assert_instr(fabd))]
207pub fn vabdh_f16(a: f16, b: f16) -> f16 {
208    unsafe { simd_extract!(vabd_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
209}
210#[doc = "Signed Absolute difference Long"]
211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"]
212#[inline(always)]
213#[target_feature(enable = "neon")]
214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
215#[cfg_attr(test, assert_instr(sabdl2))]
216pub fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
217    unsafe {
218        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
219        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
220        let e: uint16x4_t = simd_cast(vabd_s16(c, d));
221        simd_cast(e)
222    }
223}
224#[doc = "Signed Absolute difference Long"]
225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"]
226#[inline(always)]
227#[target_feature(enable = "neon")]
228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
229#[cfg_attr(test, assert_instr(sabdl2))]
230pub fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
231    unsafe {
232        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
233        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
234        let e: uint32x2_t = simd_cast(vabd_s32(c, d));
235        simd_cast(e)
236    }
237}
238#[doc = "Signed Absolute difference Long"]
239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"]
240#[inline(always)]
241#[target_feature(enable = "neon")]
242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
243#[cfg_attr(test, assert_instr(sabdl2))]
244pub fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
245    unsafe {
246        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
247        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
248        let e: uint8x8_t = simd_cast(vabd_s8(c, d));
249        simd_cast(e)
250    }
251}
252#[doc = "Unsigned Absolute difference Long"]
253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"]
254#[inline(always)]
255#[target_feature(enable = "neon")]
256#[cfg_attr(test, assert_instr(uabdl2))]
257#[stable(feature = "neon_intrinsics", since = "1.59.0")]
258pub fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
259    unsafe {
260        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
261        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
262        simd_cast(vabd_u8(c, d))
263    }
264}
265#[doc = "Unsigned Absolute difference Long"]
266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"]
267#[inline(always)]
268#[target_feature(enable = "neon")]
269#[cfg_attr(test, assert_instr(uabdl2))]
270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
271pub fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
272    unsafe {
273        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
274        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
275        simd_cast(vabd_u16(c, d))
276    }
277}
278#[doc = "Unsigned Absolute difference Long"]
279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"]
280#[inline(always)]
281#[target_feature(enable = "neon")]
282#[cfg_attr(test, assert_instr(uabdl2))]
283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
284pub fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
285    unsafe {
286        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
287        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
288        simd_cast(vabd_u32(c, d))
289    }
290}
291#[doc = "Floating-point absolute value"]
292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)"]
293#[inline(always)]
294#[target_feature(enable = "neon")]
295#[cfg_attr(test, assert_instr(fabs))]
296#[stable(feature = "neon_intrinsics", since = "1.59.0")]
297pub fn vabs_f64(a: float64x1_t) -> float64x1_t {
298    unsafe { simd_fabs(a) }
299}
300#[doc = "Floating-point absolute value"]
301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"]
302#[inline(always)]
303#[target_feature(enable = "neon")]
304#[cfg_attr(test, assert_instr(fabs))]
305#[stable(feature = "neon_intrinsics", since = "1.59.0")]
306pub fn vabsq_f64(a: float64x2_t) -> float64x2_t {
307    unsafe { simd_fabs(a) }
308}
309#[doc = "Absolute Value (wrapping)."]
310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s64)"]
311#[inline(always)]
312#[target_feature(enable = "neon")]
313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
314#[cfg_attr(test, assert_instr(abs))]
315pub fn vabs_s64(a: int64x1_t) -> int64x1_t {
316    unsafe {
317        let neg: int64x1_t = simd_neg(a);
318        let mask: int64x1_t = simd_ge(a, neg);
319        simd_select(mask, a, neg)
320    }
321}
322#[doc = "Absolute Value (wrapping)."]
323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"]
324#[inline(always)]
325#[target_feature(enable = "neon")]
326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
327#[cfg_attr(test, assert_instr(abs))]
328pub fn vabsq_s64(a: int64x2_t) -> int64x2_t {
329    unsafe {
330        let neg: int64x2_t = simd_neg(a);
331        let mask: int64x2_t = simd_ge(a, neg);
332        simd_select(mask, a, neg)
333    }
334}
335#[doc = "Absolute Value (wrapping)."]
336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsd_s64)"]
337#[inline(always)]
338#[target_feature(enable = "neon")]
339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
340#[cfg_attr(test, assert_instr(abs))]
341pub fn vabsd_s64(a: i64) -> i64 {
342    unsafe extern "unadjusted" {
343        #[cfg_attr(
344            any(target_arch = "aarch64", target_arch = "arm64ec"),
345            link_name = "llvm.aarch64.neon.abs.i64"
346        )]
347        fn _vabsd_s64(a: i64) -> i64;
348    }
349    unsafe { _vabsd_s64(a) }
350}
351#[doc = "Add"]
352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"]
353#[inline(always)]
354#[target_feature(enable = "neon")]
355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
356#[cfg_attr(test, assert_instr(nop))]
357pub fn vaddd_s64(a: i64, b: i64) -> i64 {
358    a.wrapping_add(b)
359}
360#[doc = "Add"]
361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)"]
362#[inline(always)]
363#[target_feature(enable = "neon")]
364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
365#[cfg_attr(test, assert_instr(nop))]
366pub fn vaddd_u64(a: u64, b: u64) -> u64 {
367    a.wrapping_add(b)
368}
369#[doc = "Signed Add Long across Vector"]
370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"]
371#[inline(always)]
372#[target_feature(enable = "neon")]
373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
374#[cfg_attr(test, assert_instr(saddlv))]
375pub fn vaddlv_s16(a: int16x4_t) -> i32 {
376    unsafe extern "unadjusted" {
377        #[cfg_attr(
378            any(target_arch = "aarch64", target_arch = "arm64ec"),
379            link_name = "llvm.aarch64.neon.saddlv.i32.v4i16"
380        )]
381        fn _vaddlv_s16(a: int16x4_t) -> i32;
382    }
383    unsafe { _vaddlv_s16(a) }
384}
385#[doc = "Signed Add Long across Vector"]
386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"]
387#[inline(always)]
388#[target_feature(enable = "neon")]
389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
390#[cfg_attr(test, assert_instr(saddlv))]
391pub fn vaddlvq_s16(a: int16x8_t) -> i32 {
392    unsafe extern "unadjusted" {
393        #[cfg_attr(
394            any(target_arch = "aarch64", target_arch = "arm64ec"),
395            link_name = "llvm.aarch64.neon.saddlv.i32.v8i16"
396        )]
397        fn _vaddlvq_s16(a: int16x8_t) -> i32;
398    }
399    unsafe { _vaddlvq_s16(a) }
400}
401#[doc = "Signed Add Long across Vector"]
402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"]
403#[inline(always)]
404#[target_feature(enable = "neon")]
405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
406#[cfg_attr(test, assert_instr(saddlv))]
407pub fn vaddlvq_s32(a: int32x4_t) -> i64 {
408    unsafe extern "unadjusted" {
409        #[cfg_attr(
410            any(target_arch = "aarch64", target_arch = "arm64ec"),
411            link_name = "llvm.aarch64.neon.saddlv.i64.v4i32"
412        )]
413        fn _vaddlvq_s32(a: int32x4_t) -> i64;
414    }
415    unsafe { _vaddlvq_s32(a) }
416}
417#[doc = "Signed Add Long across Vector"]
418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"]
419#[inline(always)]
420#[target_feature(enable = "neon")]
421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
422#[cfg_attr(test, assert_instr(saddlp))]
423pub fn vaddlv_s32(a: int32x2_t) -> i64 {
424    unsafe extern "unadjusted" {
425        #[cfg_attr(
426            any(target_arch = "aarch64", target_arch = "arm64ec"),
427            link_name = "llvm.aarch64.neon.saddlv.i64.v2i32"
428        )]
429        fn _vaddlv_s32(a: int32x2_t) -> i64;
430    }
431    unsafe { _vaddlv_s32(a) }
432}
433#[doc = "Signed Add Long across Vector"]
434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"]
435#[inline(always)]
436#[target_feature(enable = "neon")]
437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
438#[cfg_attr(test, assert_instr(saddlv))]
439pub fn vaddlv_s8(a: int8x8_t) -> i16 {
440    unsafe extern "unadjusted" {
441        #[cfg_attr(
442            any(target_arch = "aarch64", target_arch = "arm64ec"),
443            link_name = "llvm.aarch64.neon.saddlv.i32.v8i8"
444        )]
445        fn _vaddlv_s8(a: int8x8_t) -> i32;
446    }
447    unsafe { _vaddlv_s8(a) as i16 }
448}
449#[doc = "Signed Add Long across Vector"]
450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"]
451#[inline(always)]
452#[target_feature(enable = "neon")]
453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
454#[cfg_attr(test, assert_instr(saddlv))]
455pub fn vaddlvq_s8(a: int8x16_t) -> i16 {
456    unsafe extern "unadjusted" {
457        #[cfg_attr(
458            any(target_arch = "aarch64", target_arch = "arm64ec"),
459            link_name = "llvm.aarch64.neon.saddlv.i32.v16i8"
460        )]
461        fn _vaddlvq_s8(a: int8x16_t) -> i32;
462    }
463    unsafe { _vaddlvq_s8(a) as i16 }
464}
465#[doc = "Unsigned Add Long across Vector"]
466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"]
467#[inline(always)]
468#[target_feature(enable = "neon")]
469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
470#[cfg_attr(test, assert_instr(uaddlv))]
471pub fn vaddlv_u16(a: uint16x4_t) -> u32 {
472    unsafe extern "unadjusted" {
473        #[cfg_attr(
474            any(target_arch = "aarch64", target_arch = "arm64ec"),
475            link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16"
476        )]
477        fn _vaddlv_u16(a: uint16x4_t) -> u32;
478    }
479    unsafe { _vaddlv_u16(a) }
480}
481#[doc = "Unsigned Add Long across Vector"]
482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"]
483#[inline(always)]
484#[target_feature(enable = "neon")]
485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
486#[cfg_attr(test, assert_instr(uaddlv))]
487pub fn vaddlvq_u16(a: uint16x8_t) -> u32 {
488    unsafe extern "unadjusted" {
489        #[cfg_attr(
490            any(target_arch = "aarch64", target_arch = "arm64ec"),
491            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16"
492        )]
493        fn _vaddlvq_u16(a: uint16x8_t) -> u32;
494    }
495    unsafe { _vaddlvq_u16(a) }
496}
497#[doc = "Unsigned Add Long across Vector"]
498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"]
499#[inline(always)]
500#[target_feature(enable = "neon")]
501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
502#[cfg_attr(test, assert_instr(uaddlv))]
503pub fn vaddlvq_u32(a: uint32x4_t) -> u64 {
504    unsafe extern "unadjusted" {
505        #[cfg_attr(
506            any(target_arch = "aarch64", target_arch = "arm64ec"),
507            link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32"
508        )]
509        fn _vaddlvq_u32(a: uint32x4_t) -> u64;
510    }
511    unsafe { _vaddlvq_u32(a) }
512}
513#[doc = "Unsigned Add Long across Vector"]
514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"]
515#[inline(always)]
516#[target_feature(enable = "neon")]
517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
518#[cfg_attr(test, assert_instr(uaddlp))]
519pub fn vaddlv_u32(a: uint32x2_t) -> u64 {
520    unsafe extern "unadjusted" {
521        #[cfg_attr(
522            any(target_arch = "aarch64", target_arch = "arm64ec"),
523            link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32"
524        )]
525        fn _vaddlv_u32(a: uint32x2_t) -> u64;
526    }
527    unsafe { _vaddlv_u32(a) }
528}
529#[doc = "Unsigned Add Long across Vector"]
530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"]
531#[inline(always)]
532#[target_feature(enable = "neon")]
533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
534#[cfg_attr(test, assert_instr(uaddlv))]
535pub fn vaddlv_u8(a: uint8x8_t) -> u16 {
536    unsafe extern "unadjusted" {
537        #[cfg_attr(
538            any(target_arch = "aarch64", target_arch = "arm64ec"),
539            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8"
540        )]
541        fn _vaddlv_u8(a: uint8x8_t) -> i32;
542    }
543    unsafe { _vaddlv_u8(a) as u16 }
544}
545#[doc = "Unsigned Add Long across Vector"]
546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"]
547#[inline(always)]
548#[target_feature(enable = "neon")]
549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
550#[cfg_attr(test, assert_instr(uaddlv))]
551pub fn vaddlvq_u8(a: uint8x16_t) -> u16 {
552    unsafe extern "unadjusted" {
553        #[cfg_attr(
554            any(target_arch = "aarch64", target_arch = "arm64ec"),
555            link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8"
556        )]
557        fn _vaddlvq_u8(a: uint8x16_t) -> i32;
558    }
559    unsafe { _vaddlvq_u8(a) as u16 }
560}
561#[doc = "Floating-point add across vector"]
562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"]
563#[inline(always)]
564#[target_feature(enable = "neon")]
565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
566#[cfg_attr(test, assert_instr(faddp))]
567pub fn vaddv_f32(a: float32x2_t) -> f32 {
568    unsafe extern "unadjusted" {
569        #[cfg_attr(
570            any(target_arch = "aarch64", target_arch = "arm64ec"),
571            link_name = "llvm.aarch64.neon.faddv.f32.v2f32"
572        )]
573        fn _vaddv_f32(a: float32x2_t) -> f32;
574    }
575    unsafe { _vaddv_f32(a) }
576}
577#[doc = "Floating-point add across vector"]
578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"]
579#[inline(always)]
580#[target_feature(enable = "neon")]
581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
582#[cfg_attr(test, assert_instr(faddp))]
583pub fn vaddvq_f32(a: float32x4_t) -> f32 {
584    unsafe extern "unadjusted" {
585        #[cfg_attr(
586            any(target_arch = "aarch64", target_arch = "arm64ec"),
587            link_name = "llvm.aarch64.neon.faddv.f32.v4f32"
588        )]
589        fn _vaddvq_f32(a: float32x4_t) -> f32;
590    }
591    unsafe { _vaddvq_f32(a) }
592}
593#[doc = "Floating-point add across vector"]
594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"]
595#[inline(always)]
596#[target_feature(enable = "neon")]
597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
598#[cfg_attr(test, assert_instr(faddp))]
599pub fn vaddvq_f64(a: float64x2_t) -> f64 {
600    unsafe extern "unadjusted" {
601        #[cfg_attr(
602            any(target_arch = "aarch64", target_arch = "arm64ec"),
603            link_name = "llvm.aarch64.neon.faddv.f64.v2f64"
604        )]
605        fn _vaddvq_f64(a: float64x2_t) -> f64;
606    }
607    unsafe { _vaddvq_f64(a) }
608}
609#[doc = "Add across vector"]
610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"]
611#[inline(always)]
612#[target_feature(enable = "neon")]
613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
614#[cfg_attr(test, assert_instr(addp))]
615pub fn vaddv_s32(a: int32x2_t) -> i32 {
616    unsafe { simd_reduce_add_ordered(a, 0) }
617}
618#[doc = "Add across vector"]
619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"]
620#[inline(always)]
621#[target_feature(enable = "neon")]
622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
623#[cfg_attr(test, assert_instr(addv))]
624pub fn vaddv_s8(a: int8x8_t) -> i8 {
625    unsafe { simd_reduce_add_ordered(a, 0) }
626}
627#[doc = "Add across vector"]
628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"]
629#[inline(always)]
630#[target_feature(enable = "neon")]
631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
632#[cfg_attr(test, assert_instr(addv))]
633pub fn vaddvq_s8(a: int8x16_t) -> i8 {
634    unsafe { simd_reduce_add_ordered(a, 0) }
635}
636#[doc = "Add across vector"]
637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"]
638#[inline(always)]
639#[target_feature(enable = "neon")]
640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
641#[cfg_attr(test, assert_instr(addv))]
642pub fn vaddv_s16(a: int16x4_t) -> i16 {
643    unsafe { simd_reduce_add_ordered(a, 0) }
644}
645#[doc = "Add across vector"]
646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"]
647#[inline(always)]
648#[target_feature(enable = "neon")]
649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
650#[cfg_attr(test, assert_instr(addv))]
651pub fn vaddvq_s16(a: int16x8_t) -> i16 {
652    unsafe { simd_reduce_add_ordered(a, 0) }
653}
654#[doc = "Add across vector"]
655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"]
656#[inline(always)]
657#[target_feature(enable = "neon")]
658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
659#[cfg_attr(test, assert_instr(addv))]
660pub fn vaddvq_s32(a: int32x4_t) -> i32 {
661    unsafe { simd_reduce_add_ordered(a, 0) }
662}
663#[doc = "Add across vector"]
664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"]
665#[inline(always)]
666#[target_feature(enable = "neon")]
667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
668#[cfg_attr(test, assert_instr(addp))]
669pub fn vaddv_u32(a: uint32x2_t) -> u32 {
670    unsafe { simd_reduce_add_ordered(a, 0) }
671}
672#[doc = "Add across vector"]
673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"]
674#[inline(always)]
675#[target_feature(enable = "neon")]
676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
677#[cfg_attr(test, assert_instr(addv))]
678pub fn vaddv_u8(a: uint8x8_t) -> u8 {
679    unsafe { simd_reduce_add_ordered(a, 0) }
680}
681#[doc = "Add across vector"]
682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"]
683#[inline(always)]
684#[target_feature(enable = "neon")]
685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
686#[cfg_attr(test, assert_instr(addv))]
687pub fn vaddvq_u8(a: uint8x16_t) -> u8 {
688    unsafe { simd_reduce_add_ordered(a, 0) }
689}
690#[doc = "Add across vector"]
691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"]
692#[inline(always)]
693#[target_feature(enable = "neon")]
694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
695#[cfg_attr(test, assert_instr(addv))]
696pub fn vaddv_u16(a: uint16x4_t) -> u16 {
697    unsafe { simd_reduce_add_ordered(a, 0) }
698}
699#[doc = "Add across vector"]
700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"]
701#[inline(always)]
702#[target_feature(enable = "neon")]
703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
704#[cfg_attr(test, assert_instr(addv))]
705pub fn vaddvq_u16(a: uint16x8_t) -> u16 {
706    unsafe { simd_reduce_add_ordered(a, 0) }
707}
708#[doc = "Add across vector"]
709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"]
710#[inline(always)]
711#[target_feature(enable = "neon")]
712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
713#[cfg_attr(test, assert_instr(addv))]
714pub fn vaddvq_u32(a: uint32x4_t) -> u32 {
715    unsafe { simd_reduce_add_ordered(a, 0) }
716}
717#[doc = "Add across vector"]
718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"]
719#[inline(always)]
720#[target_feature(enable = "neon")]
721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
722#[cfg_attr(test, assert_instr(addp))]
723pub fn vaddvq_s64(a: int64x2_t) -> i64 {
724    unsafe { simd_reduce_add_ordered(a, 0) }
725}
726#[doc = "Add across vector"]
727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"]
728#[inline(always)]
729#[target_feature(enable = "neon")]
730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
731#[cfg_attr(test, assert_instr(addp))]
732pub fn vaddvq_u64(a: uint64x2_t) -> u64 {
733    unsafe { simd_reduce_add_ordered(a, 0) }
734}
735#[doc = "Multi-vector floating-point absolute maximum"]
736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f16)"]
737#[inline(always)]
738#[target_feature(enable = "neon,faminmax")]
739#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
740#[unstable(feature = "faminmax", issue = "137933")]
741pub fn vamax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
742    unsafe extern "unadjusted" {
743        #[cfg_attr(
744            any(target_arch = "aarch64", target_arch = "arm64ec"),
745            link_name = "llvm.aarch64.neon.famax.v4f16"
746        )]
747        fn _vamax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
748    }
749    unsafe { _vamax_f16(a, b) }
750}
751#[doc = "Multi-vector floating-point absolute maximum"]
752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f16)"]
753#[inline(always)]
754#[target_feature(enable = "neon,faminmax")]
755#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
756#[unstable(feature = "faminmax", issue = "137933")]
757pub fn vamaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
758    unsafe extern "unadjusted" {
759        #[cfg_attr(
760            any(target_arch = "aarch64", target_arch = "arm64ec"),
761            link_name = "llvm.aarch64.neon.famax.v8f16"
762        )]
763        fn _vamaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
764    }
765    unsafe { _vamaxq_f16(a, b) }
766}
767#[doc = "Multi-vector floating-point absolute maximum"]
768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f32)"]
769#[inline(always)]
770#[target_feature(enable = "neon,faminmax")]
771#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
772#[unstable(feature = "faminmax", issue = "137933")]
773pub fn vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
774    unsafe extern "unadjusted" {
775        #[cfg_attr(
776            any(target_arch = "aarch64", target_arch = "arm64ec"),
777            link_name = "llvm.aarch64.neon.famax.v2f32"
778        )]
779        fn _vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
780    }
781    unsafe { _vamax_f32(a, b) }
782}
783#[doc = "Multi-vector floating-point absolute maximum"]
784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f32)"]
785#[inline(always)]
786#[target_feature(enable = "neon,faminmax")]
787#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
788#[unstable(feature = "faminmax", issue = "137933")]
789pub fn vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
790    unsafe extern "unadjusted" {
791        #[cfg_attr(
792            any(target_arch = "aarch64", target_arch = "arm64ec"),
793            link_name = "llvm.aarch64.neon.famax.v4f32"
794        )]
795        fn _vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
796    }
797    unsafe { _vamaxq_f32(a, b) }
798}
799#[doc = "Multi-vector floating-point absolute maximum"]
800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f64)"]
801#[inline(always)]
802#[target_feature(enable = "neon,faminmax")]
803#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
804#[unstable(feature = "faminmax", issue = "137933")]
805pub fn vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
806    unsafe extern "unadjusted" {
807        #[cfg_attr(
808            any(target_arch = "aarch64", target_arch = "arm64ec"),
809            link_name = "llvm.aarch64.neon.famax.v2f64"
810        )]
811        fn _vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
812    }
813    unsafe { _vamaxq_f64(a, b) }
814}
815#[doc = "Multi-vector floating-point absolute minimum"]
816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f16)"]
817#[inline(always)]
818#[target_feature(enable = "neon,faminmax")]
819#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
820#[unstable(feature = "faminmax", issue = "137933")]
821pub fn vamin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
822    unsafe extern "unadjusted" {
823        #[cfg_attr(
824            any(target_arch = "aarch64", target_arch = "arm64ec"),
825            link_name = "llvm.aarch64.neon.famin.v4f16"
826        )]
827        fn _vamin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
828    }
829    unsafe { _vamin_f16(a, b) }
830}
831#[doc = "Multi-vector floating-point absolute minimum"]
832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f16)"]
833#[inline(always)]
834#[target_feature(enable = "neon,faminmax")]
835#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
836#[unstable(feature = "faminmax", issue = "137933")]
837pub fn vaminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
838    unsafe extern "unadjusted" {
839        #[cfg_attr(
840            any(target_arch = "aarch64", target_arch = "arm64ec"),
841            link_name = "llvm.aarch64.neon.famin.v8f16"
842        )]
843        fn _vaminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
844    }
845    unsafe { _vaminq_f16(a, b) }
846}
847#[doc = "Multi-vector floating-point absolute minimum"]
848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f32)"]
849#[inline(always)]
850#[target_feature(enable = "neon,faminmax")]
851#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
852#[unstable(feature = "faminmax", issue = "137933")]
853pub fn vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
854    unsafe extern "unadjusted" {
855        #[cfg_attr(
856            any(target_arch = "aarch64", target_arch = "arm64ec"),
857            link_name = "llvm.aarch64.neon.famin.v2f32"
858        )]
859        fn _vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
860    }
861    unsafe { _vamin_f32(a, b) }
862}
863#[doc = "Multi-vector floating-point absolute minimum"]
864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f32)"]
865#[inline(always)]
866#[target_feature(enable = "neon,faminmax")]
867#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
868#[unstable(feature = "faminmax", issue = "137933")]
869pub fn vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
870    unsafe extern "unadjusted" {
871        #[cfg_attr(
872            any(target_arch = "aarch64", target_arch = "arm64ec"),
873            link_name = "llvm.aarch64.neon.famin.v4f32"
874        )]
875        fn _vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
876    }
877    unsafe { _vaminq_f32(a, b) }
878}
879#[doc = "Multi-vector floating-point absolute minimum"]
880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f64)"]
881#[inline(always)]
882#[target_feature(enable = "neon,faminmax")]
883#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
884#[unstable(feature = "faminmax", issue = "137933")]
885pub fn vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
886    unsafe extern "unadjusted" {
887        #[cfg_attr(
888            any(target_arch = "aarch64", target_arch = "arm64ec"),
889            link_name = "llvm.aarch64.neon.famin.v2f64"
890        )]
891        fn _vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
892    }
893    unsafe { _vaminq_f64(a, b) }
894}
895#[doc = "Bit clear and exclusive OR"]
896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"]
897#[inline(always)]
898#[target_feature(enable = "neon,sha3")]
899#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
900#[cfg_attr(test, assert_instr(bcax))]
901pub fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
902    unsafe extern "unadjusted" {
903        #[cfg_attr(
904            any(target_arch = "aarch64", target_arch = "arm64ec"),
905            link_name = "llvm.aarch64.crypto.bcaxs.v16i8"
906        )]
907        fn _vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
908    }
909    unsafe { _vbcaxq_s8(a, b, c) }
910}
911#[doc = "Bit clear and exclusive OR"]
912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"]
913#[inline(always)]
914#[target_feature(enable = "neon,sha3")]
915#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
916#[cfg_attr(test, assert_instr(bcax))]
917pub fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
918    unsafe extern "unadjusted" {
919        #[cfg_attr(
920            any(target_arch = "aarch64", target_arch = "arm64ec"),
921            link_name = "llvm.aarch64.crypto.bcaxs.v8i16"
922        )]
923        fn _vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
924    }
925    unsafe { _vbcaxq_s16(a, b, c) }
926}
927#[doc = "Bit clear and exclusive OR"]
928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"]
929#[inline(always)]
930#[target_feature(enable = "neon,sha3")]
931#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
932#[cfg_attr(test, assert_instr(bcax))]
933pub fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
934    unsafe extern "unadjusted" {
935        #[cfg_attr(
936            any(target_arch = "aarch64", target_arch = "arm64ec"),
937            link_name = "llvm.aarch64.crypto.bcaxs.v4i32"
938        )]
939        fn _vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
940    }
941    unsafe { _vbcaxq_s32(a, b, c) }
942}
943#[doc = "Bit clear and exclusive OR"]
944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"]
945#[inline(always)]
946#[target_feature(enable = "neon,sha3")]
947#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
948#[cfg_attr(test, assert_instr(bcax))]
949pub fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
950    unsafe extern "unadjusted" {
951        #[cfg_attr(
952            any(target_arch = "aarch64", target_arch = "arm64ec"),
953            link_name = "llvm.aarch64.crypto.bcaxs.v2i64"
954        )]
955        fn _vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
956    }
957    unsafe { _vbcaxq_s64(a, b, c) }
958}
959#[doc = "Bit clear and exclusive OR"]
960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"]
961#[inline(always)]
962#[target_feature(enable = "neon,sha3")]
963#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
964#[cfg_attr(test, assert_instr(bcax))]
965pub fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
966    unsafe extern "unadjusted" {
967        #[cfg_attr(
968            any(target_arch = "aarch64", target_arch = "arm64ec"),
969            link_name = "llvm.aarch64.crypto.bcaxu.v16i8"
970        )]
971        fn _vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
972    }
973    unsafe { _vbcaxq_u8(a, b, c) }
974}
975#[doc = "Bit clear and exclusive OR"]
976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"]
977#[inline(always)]
978#[target_feature(enable = "neon,sha3")]
979#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
980#[cfg_attr(test, assert_instr(bcax))]
981pub fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
982    unsafe extern "unadjusted" {
983        #[cfg_attr(
984            any(target_arch = "aarch64", target_arch = "arm64ec"),
985            link_name = "llvm.aarch64.crypto.bcaxu.v8i16"
986        )]
987        fn _vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
988    }
989    unsafe { _vbcaxq_u16(a, b, c) }
990}
991#[doc = "Bit clear and exclusive OR"]
992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"]
993#[inline(always)]
994#[target_feature(enable = "neon,sha3")]
995#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
996#[cfg_attr(test, assert_instr(bcax))]
997pub fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
998    unsafe extern "unadjusted" {
999        #[cfg_attr(
1000            any(target_arch = "aarch64", target_arch = "arm64ec"),
1001            link_name = "llvm.aarch64.crypto.bcaxu.v4i32"
1002        )]
1003        fn _vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
1004    }
1005    unsafe { _vbcaxq_u32(a, b, c) }
1006}
1007#[doc = "Bit clear and exclusive OR"]
1008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"]
1009#[inline(always)]
1010#[target_feature(enable = "neon,sha3")]
1011#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1012#[cfg_attr(test, assert_instr(bcax))]
1013pub fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
1014    unsafe extern "unadjusted" {
1015        #[cfg_attr(
1016            any(target_arch = "aarch64", target_arch = "arm64ec"),
1017            link_name = "llvm.aarch64.crypto.bcaxu.v2i64"
1018        )]
1019        fn _vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
1020    }
1021    unsafe { _vbcaxq_u64(a, b, c) }
1022}
1023#[doc = "Floating-point complex add"]
1024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f16)"]
1025#[inline(always)]
1026#[target_feature(enable = "neon,fp16")]
1027#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1028#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1029#[cfg(not(target_arch = "arm64ec"))]
1030#[cfg_attr(test, assert_instr(fcadd))]
1031pub fn vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1032    unsafe extern "unadjusted" {
1033        #[cfg_attr(
1034            any(target_arch = "aarch64", target_arch = "arm64ec"),
1035            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f16"
1036        )]
1037        fn _vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1038    }
1039    unsafe { _vcadd_rot270_f16(a, b) }
1040}
1041#[doc = "Floating-point complex add"]
1042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f16)"]
1043#[inline(always)]
1044#[target_feature(enable = "neon,fp16")]
1045#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1046#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1047#[cfg(not(target_arch = "arm64ec"))]
1048#[cfg_attr(test, assert_instr(fcadd))]
1049pub fn vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1050    unsafe extern "unadjusted" {
1051        #[cfg_attr(
1052            any(target_arch = "aarch64", target_arch = "arm64ec"),
1053            link_name = "llvm.aarch64.neon.vcadd.rot270.v8f16"
1054        )]
1055        fn _vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1056    }
1057    unsafe { _vcaddq_rot270_f16(a, b) }
1058}
1059#[doc = "Floating-point complex add"]
1060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"]
1061#[inline(always)]
1062#[target_feature(enable = "neon,fcma")]
1063#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1064#[cfg_attr(test, assert_instr(fcadd))]
1065pub fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1066    unsafe extern "unadjusted" {
1067        #[cfg_attr(
1068            any(target_arch = "aarch64", target_arch = "arm64ec"),
1069            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32"
1070        )]
1071        fn _vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1072    }
1073    unsafe { _vcadd_rot270_f32(a, b) }
1074}
1075#[doc = "Floating-point complex add"]
1076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"]
1077#[inline(always)]
1078#[target_feature(enable = "neon,fcma")]
1079#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1080#[cfg_attr(test, assert_instr(fcadd))]
1081pub fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1082    unsafe extern "unadjusted" {
1083        #[cfg_attr(
1084            any(target_arch = "aarch64", target_arch = "arm64ec"),
1085            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32"
1086        )]
1087        fn _vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1088    }
1089    unsafe { _vcaddq_rot270_f32(a, b) }
1090}
1091#[doc = "Floating-point complex add"]
1092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"]
1093#[inline(always)]
1094#[target_feature(enable = "neon,fcma")]
1095#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1096#[cfg_attr(test, assert_instr(fcadd))]
1097pub fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1098    unsafe extern "unadjusted" {
1099        #[cfg_attr(
1100            any(target_arch = "aarch64", target_arch = "arm64ec"),
1101            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64"
1102        )]
1103        fn _vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1104    }
1105    unsafe { _vcaddq_rot270_f64(a, b) }
1106}
1107#[doc = "Floating-point complex add"]
1108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f16)"]
1109#[inline(always)]
1110#[target_feature(enable = "neon,fp16")]
1111#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1112#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1113#[cfg(not(target_arch = "arm64ec"))]
1114#[cfg_attr(test, assert_instr(fcadd))]
1115pub fn vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1116    unsafe extern "unadjusted" {
1117        #[cfg_attr(
1118            any(target_arch = "aarch64", target_arch = "arm64ec"),
1119            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f16"
1120        )]
1121        fn _vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1122    }
1123    unsafe { _vcadd_rot90_f16(a, b) }
1124}
1125#[doc = "Floating-point complex add"]
1126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f16)"]
1127#[inline(always)]
1128#[target_feature(enable = "neon,fp16")]
1129#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1130#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1131#[cfg(not(target_arch = "arm64ec"))]
1132#[cfg_attr(test, assert_instr(fcadd))]
1133pub fn vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1134    unsafe extern "unadjusted" {
1135        #[cfg_attr(
1136            any(target_arch = "aarch64", target_arch = "arm64ec"),
1137            link_name = "llvm.aarch64.neon.vcadd.rot90.v8f16"
1138        )]
1139        fn _vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1140    }
1141    unsafe { _vcaddq_rot90_f16(a, b) }
1142}
1143#[doc = "Floating-point complex add"]
1144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"]
1145#[inline(always)]
1146#[target_feature(enable = "neon,fcma")]
1147#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1148#[cfg_attr(test, assert_instr(fcadd))]
1149pub fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1150    unsafe extern "unadjusted" {
1151        #[cfg_attr(
1152            any(target_arch = "aarch64", target_arch = "arm64ec"),
1153            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32"
1154        )]
1155        fn _vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1156    }
1157    unsafe { _vcadd_rot90_f32(a, b) }
1158}
1159#[doc = "Floating-point complex add"]
1160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"]
1161#[inline(always)]
1162#[target_feature(enable = "neon,fcma")]
1163#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1164#[cfg_attr(test, assert_instr(fcadd))]
1165pub fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1166    unsafe extern "unadjusted" {
1167        #[cfg_attr(
1168            any(target_arch = "aarch64", target_arch = "arm64ec"),
1169            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32"
1170        )]
1171        fn _vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1172    }
1173    unsafe { _vcaddq_rot90_f32(a, b) }
1174}
1175#[doc = "Floating-point complex add"]
1176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"]
1177#[inline(always)]
1178#[target_feature(enable = "neon,fcma")]
1179#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1180#[cfg_attr(test, assert_instr(fcadd))]
1181pub fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1182    unsafe extern "unadjusted" {
1183        #[cfg_attr(
1184            any(target_arch = "aarch64", target_arch = "arm64ec"),
1185            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64"
1186        )]
1187        fn _vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1188    }
1189    unsafe { _vcaddq_rot90_f64(a, b) }
1190}
1191#[doc = "Floating-point absolute compare greater than or equal"]
1192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"]
1193#[inline(always)]
1194#[target_feature(enable = "neon")]
1195#[cfg_attr(test, assert_instr(facge))]
1196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1197pub fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1198    unsafe extern "unadjusted" {
1199        #[cfg_attr(
1200            any(target_arch = "aarch64", target_arch = "arm64ec"),
1201            link_name = "llvm.aarch64.neon.facge.v1i64.v1f64"
1202        )]
1203        fn _vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1204    }
1205    unsafe { _vcage_f64(a, b) }
1206}
1207#[doc = "Floating-point absolute compare greater than or equal"]
1208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"]
1209#[inline(always)]
1210#[target_feature(enable = "neon")]
1211#[cfg_attr(test, assert_instr(facge))]
1212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1213pub fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1214    unsafe extern "unadjusted" {
1215        #[cfg_attr(
1216            any(target_arch = "aarch64", target_arch = "arm64ec"),
1217            link_name = "llvm.aarch64.neon.facge.v2i64.v2f64"
1218        )]
1219        fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1220    }
1221    unsafe { _vcageq_f64(a, b) }
1222}
1223#[doc = "Floating-point absolute compare greater than or equal"]
1224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"]
1225#[inline(always)]
1226#[target_feature(enable = "neon")]
1227#[cfg_attr(test, assert_instr(facge))]
1228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1229pub fn vcaged_f64(a: f64, b: f64) -> u64 {
1230    unsafe extern "unadjusted" {
1231        #[cfg_attr(
1232            any(target_arch = "aarch64", target_arch = "arm64ec"),
1233            link_name = "llvm.aarch64.neon.facge.i64.f64"
1234        )]
1235        fn _vcaged_f64(a: f64, b: f64) -> u64;
1236    }
1237    unsafe { _vcaged_f64(a, b) }
1238}
1239#[doc = "Floating-point absolute compare greater than or equal"]
1240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)"]
1241#[inline(always)]
1242#[target_feature(enable = "neon")]
1243#[cfg_attr(test, assert_instr(facge))]
1244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1245pub fn vcages_f32(a: f32, b: f32) -> u32 {
1246    unsafe extern "unadjusted" {
1247        #[cfg_attr(
1248            any(target_arch = "aarch64", target_arch = "arm64ec"),
1249            link_name = "llvm.aarch64.neon.facge.i32.f32"
1250        )]
1251        fn _vcages_f32(a: f32, b: f32) -> u32;
1252    }
1253    unsafe { _vcages_f32(a, b) }
1254}
1255#[doc = "Floating-point absolute compare greater than or equal"]
1256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageh_f16)"]
1257#[inline(always)]
1258#[cfg_attr(test, assert_instr(facge))]
1259#[target_feature(enable = "neon,fp16")]
1260#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1261#[cfg(not(target_arch = "arm64ec"))]
1262pub fn vcageh_f16(a: f16, b: f16) -> u16 {
1263    unsafe extern "unadjusted" {
1264        #[cfg_attr(
1265            any(target_arch = "aarch64", target_arch = "arm64ec"),
1266            link_name = "llvm.aarch64.neon.facge.i32.f16"
1267        )]
1268        fn _vcageh_f16(a: f16, b: f16) -> i32;
1269    }
1270    unsafe { _vcageh_f16(a, b) as u16 }
1271}
1272#[doc = "Floating-point absolute compare greater than"]
1273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)"]
1274#[inline(always)]
1275#[target_feature(enable = "neon")]
1276#[cfg_attr(test, assert_instr(facgt))]
1277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1278pub fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1279    unsafe extern "unadjusted" {
1280        #[cfg_attr(
1281            any(target_arch = "aarch64", target_arch = "arm64ec"),
1282            link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64"
1283        )]
1284        fn _vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1285    }
1286    unsafe { _vcagt_f64(a, b) }
1287}
1288#[doc = "Floating-point absolute compare greater than"]
1289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"]
1290#[inline(always)]
1291#[target_feature(enable = "neon")]
1292#[cfg_attr(test, assert_instr(facgt))]
1293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1294pub fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1295    unsafe extern "unadjusted" {
1296        #[cfg_attr(
1297            any(target_arch = "aarch64", target_arch = "arm64ec"),
1298            link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64"
1299        )]
1300        fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1301    }
1302    unsafe { _vcagtq_f64(a, b) }
1303}
1304#[doc = "Floating-point absolute compare greater than"]
1305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"]
1306#[inline(always)]
1307#[target_feature(enable = "neon")]
1308#[cfg_attr(test, assert_instr(facgt))]
1309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1310pub fn vcagtd_f64(a: f64, b: f64) -> u64 {
1311    unsafe extern "unadjusted" {
1312        #[cfg_attr(
1313            any(target_arch = "aarch64", target_arch = "arm64ec"),
1314            link_name = "llvm.aarch64.neon.facgt.i64.f64"
1315        )]
1316        fn _vcagtd_f64(a: f64, b: f64) -> u64;
1317    }
1318    unsafe { _vcagtd_f64(a, b) }
1319}
1320#[doc = "Floating-point absolute compare greater than"]
1321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)"]
1322#[inline(always)]
1323#[target_feature(enable = "neon")]
1324#[cfg_attr(test, assert_instr(facgt))]
1325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1326pub fn vcagts_f32(a: f32, b: f32) -> u32 {
1327    unsafe extern "unadjusted" {
1328        #[cfg_attr(
1329            any(target_arch = "aarch64", target_arch = "arm64ec"),
1330            link_name = "llvm.aarch64.neon.facgt.i32.f32"
1331        )]
1332        fn _vcagts_f32(a: f32, b: f32) -> u32;
1333    }
1334    unsafe { _vcagts_f32(a, b) }
1335}
1336#[doc = "Floating-point absolute compare greater than"]
1337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagth_f16)"]
1338#[inline(always)]
1339#[cfg_attr(test, assert_instr(facgt))]
1340#[target_feature(enable = "neon,fp16")]
1341#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1342#[cfg(not(target_arch = "arm64ec"))]
1343pub fn vcagth_f16(a: f16, b: f16) -> u16 {
1344    unsafe extern "unadjusted" {
1345        #[cfg_attr(
1346            any(target_arch = "aarch64", target_arch = "arm64ec"),
1347            link_name = "llvm.aarch64.neon.facgt.i32.f16"
1348        )]
1349        fn _vcagth_f16(a: f16, b: f16) -> i32;
1350    }
1351    unsafe { _vcagth_f16(a, b) as u16 }
1352}
1353#[doc = "Floating-point absolute compare less than or equal"]
1354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)"]
1355#[inline(always)]
1356#[target_feature(enable = "neon")]
1357#[cfg_attr(test, assert_instr(facge))]
1358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1359pub fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1360    vcage_f64(b, a)
1361}
1362#[doc = "Floating-point absolute compare less than or equal"]
1363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"]
1364#[inline(always)]
1365#[target_feature(enable = "neon")]
1366#[cfg_attr(test, assert_instr(facge))]
1367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1368pub fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1369    vcageq_f64(b, a)
1370}
1371#[doc = "Floating-point absolute compare less than or equal"]
1372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)"]
1373#[inline(always)]
1374#[target_feature(enable = "neon")]
1375#[cfg_attr(test, assert_instr(facge))]
1376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1377pub fn vcaled_f64(a: f64, b: f64) -> u64 {
1378    vcaged_f64(b, a)
1379}
1380#[doc = "Floating-point absolute compare less than or equal"]
1381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)"]
1382#[inline(always)]
1383#[target_feature(enable = "neon")]
1384#[cfg_attr(test, assert_instr(facge))]
1385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1386pub fn vcales_f32(a: f32, b: f32) -> u32 {
1387    vcages_f32(b, a)
1388}
1389#[doc = "Floating-point absolute compare less than or equal"]
1390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleh_f16)"]
1391#[inline(always)]
1392#[cfg_attr(test, assert_instr(facge))]
1393#[target_feature(enable = "neon,fp16")]
1394#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1395#[cfg(not(target_arch = "arm64ec"))]
1396pub fn vcaleh_f16(a: f16, b: f16) -> u16 {
1397    vcageh_f16(b, a)
1398}
1399#[doc = "Floating-point absolute compare less than"]
1400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)"]
1401#[inline(always)]
1402#[target_feature(enable = "neon")]
1403#[cfg_attr(test, assert_instr(facgt))]
1404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1405pub fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1406    vcagt_f64(b, a)
1407}
1408#[doc = "Floating-point absolute compare less than"]
1409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"]
1410#[inline(always)]
1411#[target_feature(enable = "neon")]
1412#[cfg_attr(test, assert_instr(facgt))]
1413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1414pub fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1415    vcagtq_f64(b, a)
1416}
1417#[doc = "Floating-point absolute compare less than"]
1418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)"]
1419#[inline(always)]
1420#[target_feature(enable = "neon")]
1421#[cfg_attr(test, assert_instr(facgt))]
1422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1423pub fn vcaltd_f64(a: f64, b: f64) -> u64 {
1424    vcagtd_f64(b, a)
1425}
1426#[doc = "Floating-point absolute compare less than"]
1427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)"]
1428#[inline(always)]
1429#[target_feature(enable = "neon")]
1430#[cfg_attr(test, assert_instr(facgt))]
1431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1432pub fn vcalts_f32(a: f32, b: f32) -> u32 {
1433    vcagts_f32(b, a)
1434}
1435#[doc = "Floating-point absolute compare less than"]
1436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalth_f16)"]
1437#[inline(always)]
1438#[cfg_attr(test, assert_instr(facgt))]
1439#[target_feature(enable = "neon,fp16")]
1440#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1441#[cfg(not(target_arch = "arm64ec"))]
1442pub fn vcalth_f16(a: f16, b: f16) -> u16 {
1443    vcagth_f16(b, a)
1444}
1445#[doc = "Floating-point compare equal"]
1446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)"]
1447#[inline(always)]
1448#[target_feature(enable = "neon")]
1449#[cfg_attr(test, assert_instr(fcmeq))]
1450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1451pub fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1452    unsafe { simd_eq(a, b) }
1453}
1454#[doc = "Floating-point compare equal"]
1455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"]
1456#[inline(always)]
1457#[target_feature(enable = "neon")]
1458#[cfg_attr(test, assert_instr(fcmeq))]
1459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1460pub fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1461    unsafe { simd_eq(a, b) }
1462}
1463#[doc = "Compare bitwise Equal (vector)"]
1464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)"]
1465#[inline(always)]
1466#[target_feature(enable = "neon")]
1467#[cfg_attr(test, assert_instr(cmeq))]
1468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1469pub fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1470    unsafe { simd_eq(a, b) }
1471}
1472#[doc = "Compare bitwise Equal (vector)"]
1473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"]
1474#[inline(always)]
1475#[target_feature(enable = "neon")]
1476#[cfg_attr(test, assert_instr(cmeq))]
1477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1478pub fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1479    unsafe { simd_eq(a, b) }
1480}
1481#[doc = "Compare bitwise Equal (vector)"]
1482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)"]
1483#[inline(always)]
1484#[target_feature(enable = "neon")]
1485#[cfg_attr(test, assert_instr(cmeq))]
1486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1487pub fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1488    unsafe { simd_eq(a, b) }
1489}
1490#[doc = "Compare bitwise Equal (vector)"]
1491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"]
1492#[inline(always)]
1493#[target_feature(enable = "neon")]
1494#[cfg_attr(test, assert_instr(cmeq))]
1495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1496pub fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1497    unsafe { simd_eq(a, b) }
1498}
1499#[doc = "Compare bitwise Equal (vector)"]
1500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)"]
1501#[inline(always)]
1502#[target_feature(enable = "neon")]
1503#[cfg_attr(test, assert_instr(cmeq))]
1504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1505pub fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
1506    unsafe { simd_eq(a, b) }
1507}
1508#[doc = "Compare bitwise Equal (vector)"]
1509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"]
1510#[inline(always)]
1511#[target_feature(enable = "neon")]
1512#[cfg_attr(test, assert_instr(cmeq))]
1513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1514pub fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
1515    unsafe { simd_eq(a, b) }
1516}
1517#[doc = "Floating-point compare equal"]
1518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)"]
1519#[inline(always)]
1520#[target_feature(enable = "neon")]
1521#[cfg_attr(test, assert_instr(fcmp))]
1522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1523pub fn vceqd_f64(a: f64, b: f64) -> u64 {
1524    unsafe { simd_extract!(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1525}
1526#[doc = "Floating-point compare equal"]
1527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)"]
1528#[inline(always)]
1529#[target_feature(enable = "neon")]
1530#[cfg_attr(test, assert_instr(fcmp))]
1531#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1532pub fn vceqs_f32(a: f32, b: f32) -> u32 {
1533    unsafe { simd_extract!(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1534}
1535#[doc = "Compare bitwise equal"]
1536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)"]
1537#[inline(always)]
1538#[target_feature(enable = "neon")]
1539#[cfg_attr(test, assert_instr(cmp))]
1540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1541pub fn vceqd_s64(a: i64, b: i64) -> u64 {
1542    unsafe { transmute(vceq_s64(transmute(a), transmute(b))) }
1543}
1544#[doc = "Compare bitwise equal"]
1545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)"]
1546#[inline(always)]
1547#[target_feature(enable = "neon")]
1548#[cfg_attr(test, assert_instr(cmp))]
1549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1550pub fn vceqd_u64(a: u64, b: u64) -> u64 {
1551    unsafe { transmute(vceq_u64(transmute(a), transmute(b))) }
1552}
1553#[doc = "Floating-point compare equal"]
1554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqh_f16)"]
1555#[inline(always)]
1556#[cfg_attr(test, assert_instr(fcmp))]
1557#[target_feature(enable = "neon,fp16")]
1558#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1559#[cfg(not(target_arch = "arm64ec"))]
1560pub fn vceqh_f16(a: f16, b: f16) -> u16 {
1561    unsafe { simd_extract!(vceq_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1562}
1563#[doc = "Floating-point compare bitwise equal to zero"]
1564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f16)"]
1565#[inline(always)]
1566#[cfg_attr(test, assert_instr(fcmeq))]
1567#[target_feature(enable = "neon,fp16")]
1568#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
1569#[cfg(not(target_arch = "arm64ec"))]
1570pub fn vceqz_f16(a: float16x4_t) -> uint16x4_t {
1571    let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0);
1572    unsafe { simd_eq(a, transmute(b)) }
1573}
1574#[doc = "Floating-point compare bitwise equal to zero"]
1575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f16)"]
1576#[inline(always)]
1577#[cfg_attr(test, assert_instr(fcmeq))]
1578#[target_feature(enable = "neon,fp16")]
1579#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
1580#[cfg(not(target_arch = "arm64ec"))]
1581pub fn vceqzq_f16(a: float16x8_t) -> uint16x8_t {
1582    let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
1583    unsafe { simd_eq(a, transmute(b)) }
1584}
1585#[doc = "Floating-point compare bitwise equal to zero"]
1586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"]
1587#[inline(always)]
1588#[target_feature(enable = "neon")]
1589#[cfg_attr(test, assert_instr(fcmeq))]
1590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1591pub fn vceqz_f32(a: float32x2_t) -> uint32x2_t {
1592    let b: f32x2 = f32x2::new(0.0, 0.0);
1593    unsafe { simd_eq(a, transmute(b)) }
1594}
1595#[doc = "Floating-point compare bitwise equal to zero"]
1596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"]
1597#[inline(always)]
1598#[target_feature(enable = "neon")]
1599#[cfg_attr(test, assert_instr(fcmeq))]
1600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1601pub fn vceqzq_f32(a: float32x4_t) -> uint32x4_t {
1602    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1603    unsafe { simd_eq(a, transmute(b)) }
1604}
1605#[doc = "Floating-point compare bitwise equal to zero"]
1606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)"]
1607#[inline(always)]
1608#[target_feature(enable = "neon")]
1609#[cfg_attr(test, assert_instr(fcmeq))]
1610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1611pub fn vceqz_f64(a: float64x1_t) -> uint64x1_t {
1612    let b: f64 = 0.0;
1613    unsafe { simd_eq(a, transmute(b)) }
1614}
1615#[doc = "Floating-point compare bitwise equal to zero"]
1616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"]
1617#[inline(always)]
1618#[target_feature(enable = "neon")]
1619#[cfg_attr(test, assert_instr(fcmeq))]
1620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1621pub fn vceqzq_f64(a: float64x2_t) -> uint64x2_t {
1622    let b: f64x2 = f64x2::new(0.0, 0.0);
1623    unsafe { simd_eq(a, transmute(b)) }
1624}
1625#[doc = "Signed compare bitwise equal to zero"]
1626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"]
1627#[inline(always)]
1628#[target_feature(enable = "neon")]
1629#[cfg_attr(test, assert_instr(cmeq))]
1630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1631pub fn vceqz_s8(a: int8x8_t) -> uint8x8_t {
1632    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1633    unsafe { simd_eq(a, transmute(b)) }
1634}
1635#[doc = "Signed compare bitwise equal to zero"]
1636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"]
1637#[inline(always)]
1638#[target_feature(enable = "neon")]
1639#[cfg_attr(test, assert_instr(cmeq))]
1640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1641pub fn vceqzq_s8(a: int8x16_t) -> uint8x16_t {
1642    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1643    unsafe { simd_eq(a, transmute(b)) }
1644}
1645#[doc = "Signed compare bitwise equal to zero"]
1646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"]
1647#[inline(always)]
1648#[target_feature(enable = "neon")]
1649#[cfg_attr(test, assert_instr(cmeq))]
1650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1651pub fn vceqz_s16(a: int16x4_t) -> uint16x4_t {
1652    let b: i16x4 = i16x4::new(0, 0, 0, 0);
1653    unsafe { simd_eq(a, transmute(b)) }
1654}
1655#[doc = "Signed compare bitwise equal to zero"]
1656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"]
1657#[inline(always)]
1658#[target_feature(enable = "neon")]
1659#[cfg_attr(test, assert_instr(cmeq))]
1660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1661pub fn vceqzq_s16(a: int16x8_t) -> uint16x8_t {
1662    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1663    unsafe { simd_eq(a, transmute(b)) }
1664}
1665#[doc = "Signed compare bitwise equal to zero"]
1666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"]
1667#[inline(always)]
1668#[target_feature(enable = "neon")]
1669#[cfg_attr(test, assert_instr(cmeq))]
1670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1671pub fn vceqz_s32(a: int32x2_t) -> uint32x2_t {
1672    let b: i32x2 = i32x2::new(0, 0);
1673    unsafe { simd_eq(a, transmute(b)) }
1674}
1675#[doc = "Signed compare bitwise equal to zero"]
1676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"]
1677#[inline(always)]
1678#[target_feature(enable = "neon")]
1679#[cfg_attr(test, assert_instr(cmeq))]
1680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1681pub fn vceqzq_s32(a: int32x4_t) -> uint32x4_t {
1682    let b: i32x4 = i32x4::new(0, 0, 0, 0);
1683    unsafe { simd_eq(a, transmute(b)) }
1684}
1685#[doc = "Signed compare bitwise equal to zero"]
1686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)"]
1687#[inline(always)]
1688#[target_feature(enable = "neon")]
1689#[cfg_attr(test, assert_instr(cmeq))]
1690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1691pub fn vceqz_s64(a: int64x1_t) -> uint64x1_t {
1692    let b: i64x1 = i64x1::new(0);
1693    unsafe { simd_eq(a, transmute(b)) }
1694}
1695#[doc = "Signed compare bitwise equal to zero"]
1696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"]
1697#[inline(always)]
1698#[target_feature(enable = "neon")]
1699#[cfg_attr(test, assert_instr(cmeq))]
1700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1701pub fn vceqzq_s64(a: int64x2_t) -> uint64x2_t {
1702    let b: i64x2 = i64x2::new(0, 0);
1703    unsafe { simd_eq(a, transmute(b)) }
1704}
1705#[doc = "Signed compare bitwise equal to zero"]
1706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"]
1707#[inline(always)]
1708#[target_feature(enable = "neon")]
1709#[cfg_attr(test, assert_instr(cmeq))]
1710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1711pub fn vceqz_p8(a: poly8x8_t) -> uint8x8_t {
1712    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1713    unsafe { simd_eq(a, transmute(b)) }
1714}
1715#[doc = "Signed compare bitwise equal to zero"]
1716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"]
1717#[inline(always)]
1718#[target_feature(enable = "neon")]
1719#[cfg_attr(test, assert_instr(cmeq))]
1720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1721pub fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t {
1722    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1723    unsafe { simd_eq(a, transmute(b)) }
1724}
1725#[doc = "Signed compare bitwise equal to zero"]
1726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)"]
1727#[inline(always)]
1728#[target_feature(enable = "neon")]
1729#[cfg_attr(test, assert_instr(cmeq))]
1730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1731pub fn vceqz_p64(a: poly64x1_t) -> uint64x1_t {
1732    let b: i64x1 = i64x1::new(0);
1733    unsafe { simd_eq(a, transmute(b)) }
1734}
1735#[doc = "Signed compare bitwise equal to zero"]
1736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"]
1737#[inline(always)]
1738#[target_feature(enable = "neon")]
1739#[cfg_attr(test, assert_instr(cmeq))]
1740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1741pub fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t {
1742    let b: i64x2 = i64x2::new(0, 0);
1743    unsafe { simd_eq(a, transmute(b)) }
1744}
1745#[doc = "Unsigned compare bitwise equal to zero"]
1746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"]
1747#[inline(always)]
1748#[target_feature(enable = "neon")]
1749#[cfg_attr(test, assert_instr(cmeq))]
1750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1751pub fn vceqz_u8(a: uint8x8_t) -> uint8x8_t {
1752    let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1753    unsafe { simd_eq(a, transmute(b)) }
1754}
1755#[doc = "Unsigned compare bitwise equal to zero"]
1756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"]
1757#[inline(always)]
1758#[target_feature(enable = "neon")]
1759#[cfg_attr(test, assert_instr(cmeq))]
1760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1761pub fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t {
1762    let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1763    unsafe { simd_eq(a, transmute(b)) }
1764}
1765#[doc = "Unsigned compare bitwise equal to zero"]
1766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"]
1767#[inline(always)]
1768#[target_feature(enable = "neon")]
1769#[cfg_attr(test, assert_instr(cmeq))]
1770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1771pub fn vceqz_u16(a: uint16x4_t) -> uint16x4_t {
1772    let b: u16x4 = u16x4::new(0, 0, 0, 0);
1773    unsafe { simd_eq(a, transmute(b)) }
1774}
1775#[doc = "Unsigned compare bitwise equal to zero"]
1776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"]
1777#[inline(always)]
1778#[target_feature(enable = "neon")]
1779#[cfg_attr(test, assert_instr(cmeq))]
1780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1781pub fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t {
1782    let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1783    unsafe { simd_eq(a, transmute(b)) }
1784}
1785#[doc = "Unsigned compare bitwise equal to zero"]
1786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"]
1787#[inline(always)]
1788#[target_feature(enable = "neon")]
1789#[cfg_attr(test, assert_instr(cmeq))]
1790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1791pub fn vceqz_u32(a: uint32x2_t) -> uint32x2_t {
1792    let b: u32x2 = u32x2::new(0, 0);
1793    unsafe { simd_eq(a, transmute(b)) }
1794}
1795#[doc = "Unsigned compare bitwise equal to zero"]
1796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"]
1797#[inline(always)]
1798#[target_feature(enable = "neon")]
1799#[cfg_attr(test, assert_instr(cmeq))]
1800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1801pub fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t {
1802    let b: u32x4 = u32x4::new(0, 0, 0, 0);
1803    unsafe { simd_eq(a, transmute(b)) }
1804}
1805#[doc = "Unsigned compare bitwise equal to zero"]
1806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"]
1807#[inline(always)]
1808#[target_feature(enable = "neon")]
1809#[cfg_attr(test, assert_instr(cmeq))]
1810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1811pub fn vceqz_u64(a: uint64x1_t) -> uint64x1_t {
1812    let b: u64x1 = u64x1::new(0);
1813    unsafe { simd_eq(a, transmute(b)) }
1814}
1815#[doc = "Unsigned compare bitwise equal to zero"]
1816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"]
1817#[inline(always)]
1818#[target_feature(enable = "neon")]
1819#[cfg_attr(test, assert_instr(cmeq))]
1820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1821pub fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t {
1822    let b: u64x2 = u64x2::new(0, 0);
1823    unsafe { simd_eq(a, transmute(b)) }
1824}
1825#[doc = "Compare bitwise equal to zero"]
1826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)"]
1827#[inline(always)]
1828#[target_feature(enable = "neon")]
1829#[cfg_attr(test, assert_instr(cmp))]
1830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1831pub fn vceqzd_s64(a: i64) -> u64 {
1832    unsafe { transmute(vceqz_s64(transmute(a))) }
1833}
1834#[doc = "Compare bitwise equal to zero"]
1835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)"]
1836#[inline(always)]
1837#[target_feature(enable = "neon")]
1838#[cfg_attr(test, assert_instr(cmp))]
1839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1840pub fn vceqzd_u64(a: u64) -> u64 {
1841    unsafe { transmute(vceqz_u64(transmute(a))) }
1842}
1843#[doc = "Floating-point compare bitwise equal to zero"]
1844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzh_f16)"]
1845#[inline(always)]
1846#[cfg_attr(test, assert_instr(fcmp))]
1847#[target_feature(enable = "neon,fp16")]
1848#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1849#[cfg(not(target_arch = "arm64ec"))]
1850pub fn vceqzh_f16(a: f16) -> u16 {
1851    unsafe { simd_extract!(vceqz_f16(vdup_n_f16(a)), 0) }
1852}
1853#[doc = "Floating-point compare bitwise equal to zero"]
1854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)"]
1855#[inline(always)]
1856#[target_feature(enable = "neon")]
1857#[cfg_attr(test, assert_instr(fcmp))]
1858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1859pub fn vceqzs_f32(a: f32) -> u32 {
1860    unsafe { simd_extract!(vceqz_f32(vdup_n_f32(a)), 0) }
1861}
1862#[doc = "Floating-point compare bitwise equal to zero"]
1863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)"]
1864#[inline(always)]
1865#[target_feature(enable = "neon")]
1866#[cfg_attr(test, assert_instr(fcmp))]
1867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1868pub fn vceqzd_f64(a: f64) -> u64 {
1869    unsafe { simd_extract!(vceqz_f64(vdup_n_f64(a)), 0) }
1870}
1871#[doc = "Floating-point compare greater than or equal"]
1872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)"]
1873#[inline(always)]
1874#[target_feature(enable = "neon")]
1875#[cfg_attr(test, assert_instr(fcmge))]
1876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1877pub fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1878    unsafe { simd_ge(a, b) }
1879}
1880#[doc = "Floating-point compare greater than or equal"]
1881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"]
1882#[inline(always)]
1883#[target_feature(enable = "neon")]
1884#[cfg_attr(test, assert_instr(fcmge))]
1885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1886pub fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1887    unsafe { simd_ge(a, b) }
1888}
1889#[doc = "Compare signed greater than or equal"]
1890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)"]
1891#[inline(always)]
1892#[target_feature(enable = "neon")]
1893#[cfg_attr(test, assert_instr(cmge))]
1894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1895pub fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1896    unsafe { simd_ge(a, b) }
1897}
1898#[doc = "Compare signed greater than or equal"]
1899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"]
1900#[inline(always)]
1901#[target_feature(enable = "neon")]
1902#[cfg_attr(test, assert_instr(cmge))]
1903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1904pub fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1905    unsafe { simd_ge(a, b) }
1906}
1907#[doc = "Compare unsigned greater than or equal"]
1908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)"]
1909#[inline(always)]
1910#[target_feature(enable = "neon")]
1911#[cfg_attr(test, assert_instr(cmhs))]
1912#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1913pub fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1914    unsafe { simd_ge(a, b) }
1915}
1916#[doc = "Compare unsigned greater than or equal"]
1917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"]
1918#[inline(always)]
1919#[target_feature(enable = "neon")]
1920#[cfg_attr(test, assert_instr(cmhs))]
1921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1922pub fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1923    unsafe { simd_ge(a, b) }
1924}
1925#[doc = "Floating-point compare greater than or equal"]
1926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)"]
1927#[inline(always)]
1928#[target_feature(enable = "neon")]
1929#[cfg_attr(test, assert_instr(fcmp))]
1930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1931pub fn vcged_f64(a: f64, b: f64) -> u64 {
1932    unsafe { simd_extract!(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1933}
1934#[doc = "Floating-point compare greater than or equal"]
1935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)"]
1936#[inline(always)]
1937#[target_feature(enable = "neon")]
1938#[cfg_attr(test, assert_instr(fcmp))]
1939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1940pub fn vcges_f32(a: f32, b: f32) -> u32 {
1941    unsafe { simd_extract!(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1942}
1943#[doc = "Compare greater than or equal"]
1944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)"]
1945#[inline(always)]
1946#[target_feature(enable = "neon")]
1947#[cfg_attr(test, assert_instr(cmp))]
1948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1949pub fn vcged_s64(a: i64, b: i64) -> u64 {
1950    unsafe { transmute(vcge_s64(transmute(a), transmute(b))) }
1951}
1952#[doc = "Compare greater than or equal"]
1953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)"]
1954#[inline(always)]
1955#[target_feature(enable = "neon")]
1956#[cfg_attr(test, assert_instr(cmp))]
1957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1958pub fn vcged_u64(a: u64, b: u64) -> u64 {
1959    unsafe { transmute(vcge_u64(transmute(a), transmute(b))) }
1960}
1961#[doc = "Floating-point compare greater than or equal"]
1962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeh_f16)"]
1963#[inline(always)]
1964#[cfg_attr(test, assert_instr(fcmp))]
1965#[target_feature(enable = "neon,fp16")]
1966#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1967#[cfg(not(target_arch = "arm64ec"))]
1968pub fn vcgeh_f16(a: f16, b: f16) -> u16 {
1969    unsafe { simd_extract!(vcge_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1970}
1971#[doc = "Floating-point compare greater than or equal to zero"]
1972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"]
1973#[inline(always)]
1974#[target_feature(enable = "neon")]
1975#[cfg_attr(test, assert_instr(fcmge))]
1976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1977pub fn vcgez_f32(a: float32x2_t) -> uint32x2_t {
1978    let b: f32x2 = f32x2::new(0.0, 0.0);
1979    unsafe { simd_ge(a, transmute(b)) }
1980}
1981#[doc = "Floating-point compare greater than or equal to zero"]
1982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"]
1983#[inline(always)]
1984#[target_feature(enable = "neon")]
1985#[cfg_attr(test, assert_instr(fcmge))]
1986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1987pub fn vcgezq_f32(a: float32x4_t) -> uint32x4_t {
1988    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1989    unsafe { simd_ge(a, transmute(b)) }
1990}
1991#[doc = "Floating-point compare greater than or equal to zero"]
1992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)"]
1993#[inline(always)]
1994#[target_feature(enable = "neon")]
1995#[cfg_attr(test, assert_instr(fcmge))]
1996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1997pub fn vcgez_f64(a: float64x1_t) -> uint64x1_t {
1998    let b: f64 = 0.0;
1999    unsafe { simd_ge(a, transmute(b)) }
2000}
2001#[doc = "Floating-point compare greater than or equal to zero"]
2002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"]
2003#[inline(always)]
2004#[target_feature(enable = "neon")]
2005#[cfg_attr(test, assert_instr(fcmge))]
2006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2007pub fn vcgezq_f64(a: float64x2_t) -> uint64x2_t {
2008    let b: f64x2 = f64x2::new(0.0, 0.0);
2009    unsafe { simd_ge(a, transmute(b)) }
2010}
2011#[doc = "Compare signed greater than or equal to zero"]
2012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"]
2013#[inline(always)]
2014#[target_feature(enable = "neon")]
2015#[cfg_attr(test, assert_instr(cmge))]
2016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2017pub fn vcgez_s8(a: int8x8_t) -> uint8x8_t {
2018    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2019    unsafe { simd_ge(a, transmute(b)) }
2020}
2021#[doc = "Compare signed greater than or equal to zero"]
2022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"]
2023#[inline(always)]
2024#[target_feature(enable = "neon")]
2025#[cfg_attr(test, assert_instr(cmge))]
2026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2027pub fn vcgezq_s8(a: int8x16_t) -> uint8x16_t {
2028    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2029    unsafe { simd_ge(a, transmute(b)) }
2030}
2031#[doc = "Compare signed greater than or equal to zero"]
2032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"]
2033#[inline(always)]
2034#[target_feature(enable = "neon")]
2035#[cfg_attr(test, assert_instr(cmge))]
2036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2037pub fn vcgez_s16(a: int16x4_t) -> uint16x4_t {
2038    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2039    unsafe { simd_ge(a, transmute(b)) }
2040}
2041#[doc = "Compare signed greater than or equal to zero"]
2042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"]
2043#[inline(always)]
2044#[target_feature(enable = "neon")]
2045#[cfg_attr(test, assert_instr(cmge))]
2046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2047pub fn vcgezq_s16(a: int16x8_t) -> uint16x8_t {
2048    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2049    unsafe { simd_ge(a, transmute(b)) }
2050}
2051#[doc = "Compare signed greater than or equal to zero"]
2052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"]
2053#[inline(always)]
2054#[target_feature(enable = "neon")]
2055#[cfg_attr(test, assert_instr(cmge))]
2056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2057pub fn vcgez_s32(a: int32x2_t) -> uint32x2_t {
2058    let b: i32x2 = i32x2::new(0, 0);
2059    unsafe { simd_ge(a, transmute(b)) }
2060}
2061#[doc = "Compare signed greater than or equal to zero"]
2062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"]
2063#[inline(always)]
2064#[target_feature(enable = "neon")]
2065#[cfg_attr(test, assert_instr(cmge))]
2066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2067pub fn vcgezq_s32(a: int32x4_t) -> uint32x4_t {
2068    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2069    unsafe { simd_ge(a, transmute(b)) }
2070}
2071#[doc = "Compare signed greater than or equal to zero"]
2072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)"]
2073#[inline(always)]
2074#[target_feature(enable = "neon")]
2075#[cfg_attr(test, assert_instr(cmge))]
2076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2077pub fn vcgez_s64(a: int64x1_t) -> uint64x1_t {
2078    let b: i64x1 = i64x1::new(0);
2079    unsafe { simd_ge(a, transmute(b)) }
2080}
2081#[doc = "Compare signed greater than or equal to zero"]
2082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"]
2083#[inline(always)]
2084#[target_feature(enable = "neon")]
2085#[cfg_attr(test, assert_instr(cmge))]
2086#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2087pub fn vcgezq_s64(a: int64x2_t) -> uint64x2_t {
2088    let b: i64x2 = i64x2::new(0, 0);
2089    unsafe { simd_ge(a, transmute(b)) }
2090}
2091#[doc = "Floating-point compare greater than or equal to zero"]
2092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)"]
2093#[inline(always)]
2094#[target_feature(enable = "neon")]
2095#[cfg_attr(test, assert_instr(fcmp))]
2096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2097pub fn vcgezd_f64(a: f64) -> u64 {
2098    unsafe { simd_extract!(vcgez_f64(vdup_n_f64(a)), 0) }
2099}
2100#[doc = "Floating-point compare greater than or equal to zero"]
2101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)"]
2102#[inline(always)]
2103#[target_feature(enable = "neon")]
2104#[cfg_attr(test, assert_instr(fcmp))]
2105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2106pub fn vcgezs_f32(a: f32) -> u32 {
2107    unsafe { simd_extract!(vcgez_f32(vdup_n_f32(a)), 0) }
2108}
2109#[doc = "Compare signed greater than or equal to zero"]
2110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)"]
2111#[inline(always)]
2112#[target_feature(enable = "neon")]
2113#[cfg_attr(test, assert_instr(nop))]
2114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2115pub fn vcgezd_s64(a: i64) -> u64 {
2116    unsafe { transmute(vcgez_s64(transmute(a))) }
2117}
2118#[doc = "Floating-point compare greater than or equal to zero"]
2119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezh_f16)"]
2120#[inline(always)]
2121#[cfg_attr(test, assert_instr(fcmp))]
2122#[target_feature(enable = "neon,fp16")]
2123#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2124#[cfg(not(target_arch = "arm64ec"))]
2125pub fn vcgezh_f16(a: f16) -> u16 {
2126    unsafe { simd_extract!(vcgez_f16(vdup_n_f16(a)), 0) }
2127}
2128#[doc = "Floating-point compare greater than"]
2129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)"]
2130#[inline(always)]
2131#[target_feature(enable = "neon")]
2132#[cfg_attr(test, assert_instr(fcmgt))]
2133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2134pub fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2135    unsafe { simd_gt(a, b) }
2136}
2137#[doc = "Floating-point compare greater than"]
2138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"]
2139#[inline(always)]
2140#[target_feature(enable = "neon")]
2141#[cfg_attr(test, assert_instr(fcmgt))]
2142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2143pub fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2144    unsafe { simd_gt(a, b) }
2145}
2146#[doc = "Compare signed greater than"]
2147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)"]
2148#[inline(always)]
2149#[target_feature(enable = "neon")]
2150#[cfg_attr(test, assert_instr(cmgt))]
2151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2152pub fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2153    unsafe { simd_gt(a, b) }
2154}
2155#[doc = "Compare signed greater than"]
2156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"]
2157#[inline(always)]
2158#[target_feature(enable = "neon")]
2159#[cfg_attr(test, assert_instr(cmgt))]
2160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2161pub fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2162    unsafe { simd_gt(a, b) }
2163}
2164#[doc = "Compare unsigned greater than"]
2165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)"]
2166#[inline(always)]
2167#[target_feature(enable = "neon")]
2168#[cfg_attr(test, assert_instr(cmhi))]
2169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2170pub fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2171    unsafe { simd_gt(a, b) }
2172}
2173#[doc = "Compare unsigned greater than"]
2174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"]
2175#[inline(always)]
2176#[target_feature(enable = "neon")]
2177#[cfg_attr(test, assert_instr(cmhi))]
2178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2179pub fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2180    unsafe { simd_gt(a, b) }
2181}
2182#[doc = "Floating-point compare greater than"]
2183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)"]
2184#[inline(always)]
2185#[target_feature(enable = "neon")]
2186#[cfg_attr(test, assert_instr(fcmp))]
2187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2188pub fn vcgtd_f64(a: f64, b: f64) -> u64 {
2189    unsafe { simd_extract!(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2190}
2191#[doc = "Floating-point compare greater than"]
2192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)"]
2193#[inline(always)]
2194#[target_feature(enable = "neon")]
2195#[cfg_attr(test, assert_instr(fcmp))]
2196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2197pub fn vcgts_f32(a: f32, b: f32) -> u32 {
2198    unsafe { simd_extract!(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2199}
2200#[doc = "Compare greater than"]
2201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)"]
2202#[inline(always)]
2203#[target_feature(enable = "neon")]
2204#[cfg_attr(test, assert_instr(cmp))]
2205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2206pub fn vcgtd_s64(a: i64, b: i64) -> u64 {
2207    unsafe { transmute(vcgt_s64(transmute(a), transmute(b))) }
2208}
2209#[doc = "Compare greater than"]
2210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)"]
2211#[inline(always)]
2212#[target_feature(enable = "neon")]
2213#[cfg_attr(test, assert_instr(cmp))]
2214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2215pub fn vcgtd_u64(a: u64, b: u64) -> u64 {
2216    unsafe { transmute(vcgt_u64(transmute(a), transmute(b))) }
2217}
2218#[doc = "Floating-point compare greater than"]
2219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgth_f16)"]
2220#[inline(always)]
2221#[cfg_attr(test, assert_instr(fcmp))]
2222#[target_feature(enable = "neon,fp16")]
2223#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2224#[cfg(not(target_arch = "arm64ec"))]
2225pub fn vcgth_f16(a: f16, b: f16) -> u16 {
2226    unsafe { simd_extract!(vcgt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2227}
2228#[doc = "Floating-point compare greater than zero"]
2229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"]
2230#[inline(always)]
2231#[target_feature(enable = "neon")]
2232#[cfg_attr(test, assert_instr(fcmgt))]
2233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2234pub fn vcgtz_f32(a: float32x2_t) -> uint32x2_t {
2235    let b: f32x2 = f32x2::new(0.0, 0.0);
2236    unsafe { simd_gt(a, transmute(b)) }
2237}
2238#[doc = "Floating-point compare greater than zero"]
2239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"]
2240#[inline(always)]
2241#[target_feature(enable = "neon")]
2242#[cfg_attr(test, assert_instr(fcmgt))]
2243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2244pub fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t {
2245    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2246    unsafe { simd_gt(a, transmute(b)) }
2247}
2248#[doc = "Floating-point compare greater than zero"]
2249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)"]
2250#[inline(always)]
2251#[target_feature(enable = "neon")]
2252#[cfg_attr(test, assert_instr(fcmgt))]
2253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2254pub fn vcgtz_f64(a: float64x1_t) -> uint64x1_t {
2255    let b: f64 = 0.0;
2256    unsafe { simd_gt(a, transmute(b)) }
2257}
2258#[doc = "Floating-point compare greater than zero"]
2259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"]
2260#[inline(always)]
2261#[target_feature(enable = "neon")]
2262#[cfg_attr(test, assert_instr(fcmgt))]
2263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2264pub fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t {
2265    let b: f64x2 = f64x2::new(0.0, 0.0);
2266    unsafe { simd_gt(a, transmute(b)) }
2267}
2268#[doc = "Compare signed greater than zero"]
2269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"]
2270#[inline(always)]
2271#[target_feature(enable = "neon")]
2272#[cfg_attr(test, assert_instr(cmgt))]
2273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2274pub fn vcgtz_s8(a: int8x8_t) -> uint8x8_t {
2275    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2276    unsafe { simd_gt(a, transmute(b)) }
2277}
2278#[doc = "Compare signed greater than zero"]
2279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"]
2280#[inline(always)]
2281#[target_feature(enable = "neon")]
2282#[cfg_attr(test, assert_instr(cmgt))]
2283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2284pub fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t {
2285    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2286    unsafe { simd_gt(a, transmute(b)) }
2287}
2288#[doc = "Compare signed greater than zero"]
2289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"]
2290#[inline(always)]
2291#[target_feature(enable = "neon")]
2292#[cfg_attr(test, assert_instr(cmgt))]
2293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2294pub fn vcgtz_s16(a: int16x4_t) -> uint16x4_t {
2295    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2296    unsafe { simd_gt(a, transmute(b)) }
2297}
2298#[doc = "Compare signed greater than zero"]
2299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"]
2300#[inline(always)]
2301#[target_feature(enable = "neon")]
2302#[cfg_attr(test, assert_instr(cmgt))]
2303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2304pub fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t {
2305    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2306    unsafe { simd_gt(a, transmute(b)) }
2307}
2308#[doc = "Compare signed greater than zero"]
2309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"]
2310#[inline(always)]
2311#[target_feature(enable = "neon")]
2312#[cfg_attr(test, assert_instr(cmgt))]
2313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2314pub fn vcgtz_s32(a: int32x2_t) -> uint32x2_t {
2315    let b: i32x2 = i32x2::new(0, 0);
2316    unsafe { simd_gt(a, transmute(b)) }
2317}
2318#[doc = "Compare signed greater than zero"]
2319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"]
2320#[inline(always)]
2321#[target_feature(enable = "neon")]
2322#[cfg_attr(test, assert_instr(cmgt))]
2323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2324pub fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t {
2325    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2326    unsafe { simd_gt(a, transmute(b)) }
2327}
2328#[doc = "Compare signed greater than zero"]
2329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)"]
2330#[inline(always)]
2331#[target_feature(enable = "neon")]
2332#[cfg_attr(test, assert_instr(cmgt))]
2333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2334pub fn vcgtz_s64(a: int64x1_t) -> uint64x1_t {
2335    let b: i64x1 = i64x1::new(0);
2336    unsafe { simd_gt(a, transmute(b)) }
2337}
2338#[doc = "Compare signed greater than zero"]
2339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"]
2340#[inline(always)]
2341#[target_feature(enable = "neon")]
2342#[cfg_attr(test, assert_instr(cmgt))]
2343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2344pub fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t {
2345    let b: i64x2 = i64x2::new(0, 0);
2346    unsafe { simd_gt(a, transmute(b)) }
2347}
2348#[doc = "Floating-point compare greater than zero"]
2349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)"]
2350#[inline(always)]
2351#[target_feature(enable = "neon")]
2352#[cfg_attr(test, assert_instr(fcmp))]
2353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2354pub fn vcgtzd_f64(a: f64) -> u64 {
2355    unsafe { simd_extract!(vcgtz_f64(vdup_n_f64(a)), 0) }
2356}
2357#[doc = "Floating-point compare greater than zero"]
2358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)"]
2359#[inline(always)]
2360#[target_feature(enable = "neon")]
2361#[cfg_attr(test, assert_instr(fcmp))]
2362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2363pub fn vcgtzs_f32(a: f32) -> u32 {
2364    unsafe { simd_extract!(vcgtz_f32(vdup_n_f32(a)), 0) }
2365}
2366#[doc = "Compare signed greater than zero"]
2367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)"]
2368#[inline(always)]
2369#[target_feature(enable = "neon")]
2370#[cfg_attr(test, assert_instr(cmp))]
2371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2372pub fn vcgtzd_s64(a: i64) -> u64 {
2373    unsafe { transmute(vcgtz_s64(transmute(a))) }
2374}
2375#[doc = "Floating-point compare greater than zero"]
2376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzh_f16)"]
2377#[inline(always)]
2378#[cfg_attr(test, assert_instr(fcmp))]
2379#[target_feature(enable = "neon,fp16")]
2380#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2381#[cfg(not(target_arch = "arm64ec"))]
2382pub fn vcgtzh_f16(a: f16) -> u16 {
2383    unsafe { simd_extract!(vcgtz_f16(vdup_n_f16(a)), 0) }
2384}
2385#[doc = "Floating-point compare less than or equal"]
2386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)"]
2387#[inline(always)]
2388#[target_feature(enable = "neon")]
2389#[cfg_attr(test, assert_instr(fcmge))]
2390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2391pub fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2392    unsafe { simd_le(a, b) }
2393}
2394#[doc = "Floating-point compare less than or equal"]
2395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"]
2396#[inline(always)]
2397#[target_feature(enable = "neon")]
2398#[cfg_attr(test, assert_instr(fcmge))]
2399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2400pub fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2401    unsafe { simd_le(a, b) }
2402}
2403#[doc = "Compare signed less than or equal"]
2404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)"]
2405#[inline(always)]
2406#[target_feature(enable = "neon")]
2407#[cfg_attr(test, assert_instr(cmge))]
2408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2409pub fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2410    unsafe { simd_le(a, b) }
2411}
2412#[doc = "Compare signed less than or equal"]
2413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"]
2414#[inline(always)]
2415#[target_feature(enable = "neon")]
2416#[cfg_attr(test, assert_instr(cmge))]
2417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2418pub fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2419    unsafe { simd_le(a, b) }
2420}
2421#[doc = "Compare unsigned less than or equal"]
2422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)"]
2423#[inline(always)]
2424#[target_feature(enable = "neon")]
2425#[cfg_attr(test, assert_instr(cmhs))]
2426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2427pub fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2428    unsafe { simd_le(a, b) }
2429}
2430#[doc = "Compare unsigned less than or equal"]
2431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"]
2432#[inline(always)]
2433#[target_feature(enable = "neon")]
2434#[cfg_attr(test, assert_instr(cmhs))]
2435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2436pub fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2437    unsafe { simd_le(a, b) }
2438}
2439#[doc = "Floating-point compare less than or equal"]
2440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)"]
2441#[inline(always)]
2442#[target_feature(enable = "neon")]
2443#[cfg_attr(test, assert_instr(fcmp))]
2444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2445pub fn vcled_f64(a: f64, b: f64) -> u64 {
2446    unsafe { simd_extract!(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2447}
2448#[doc = "Floating-point compare less than or equal"]
2449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)"]
2450#[inline(always)]
2451#[target_feature(enable = "neon")]
2452#[cfg_attr(test, assert_instr(fcmp))]
2453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2454pub fn vcles_f32(a: f32, b: f32) -> u32 {
2455    unsafe { simd_extract!(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2456}
2457#[doc = "Compare less than or equal"]
2458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)"]
2459#[inline(always)]
2460#[target_feature(enable = "neon")]
2461#[cfg_attr(test, assert_instr(cmp))]
2462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2463pub fn vcled_u64(a: u64, b: u64) -> u64 {
2464    unsafe { transmute(vcle_u64(transmute(a), transmute(b))) }
2465}
2466#[doc = "Compare less than or equal"]
2467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)"]
2468#[inline(always)]
2469#[target_feature(enable = "neon")]
2470#[cfg_attr(test, assert_instr(cmp))]
2471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2472pub fn vcled_s64(a: i64, b: i64) -> u64 {
2473    unsafe { transmute(vcle_s64(transmute(a), transmute(b))) }
2474}
2475#[doc = "Floating-point compare less than or equal"]
2476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleh_f16)"]
2477#[inline(always)]
2478#[cfg_attr(test, assert_instr(fcmp))]
2479#[target_feature(enable = "neon,fp16")]
2480#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2481#[cfg(not(target_arch = "arm64ec"))]
2482pub fn vcleh_f16(a: f16, b: f16) -> u16 {
2483    unsafe { simd_extract!(vcle_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2484}
2485#[doc = "Floating-point compare less than or equal to zero"]
2486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"]
2487#[inline(always)]
2488#[target_feature(enable = "neon")]
2489#[cfg_attr(test, assert_instr(fcmle))]
2490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2491pub fn vclez_f32(a: float32x2_t) -> uint32x2_t {
2492    let b: f32x2 = f32x2::new(0.0, 0.0);
2493    unsafe { simd_le(a, transmute(b)) }
2494}
2495#[doc = "Floating-point compare less than or equal to zero"]
2496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"]
2497#[inline(always)]
2498#[target_feature(enable = "neon")]
2499#[cfg_attr(test, assert_instr(fcmle))]
2500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2501pub fn vclezq_f32(a: float32x4_t) -> uint32x4_t {
2502    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2503    unsafe { simd_le(a, transmute(b)) }
2504}
2505#[doc = "Floating-point compare less than or equal to zero"]
2506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)"]
2507#[inline(always)]
2508#[target_feature(enable = "neon")]
2509#[cfg_attr(test, assert_instr(fcmle))]
2510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2511pub fn vclez_f64(a: float64x1_t) -> uint64x1_t {
2512    let b: f64 = 0.0;
2513    unsafe { simd_le(a, transmute(b)) }
2514}
2515#[doc = "Floating-point compare less than or equal to zero"]
2516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"]
2517#[inline(always)]
2518#[target_feature(enable = "neon")]
2519#[cfg_attr(test, assert_instr(fcmle))]
2520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2521pub fn vclezq_f64(a: float64x2_t) -> uint64x2_t {
2522    let b: f64x2 = f64x2::new(0.0, 0.0);
2523    unsafe { simd_le(a, transmute(b)) }
2524}
2525#[doc = "Compare signed less than or equal to zero"]
2526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"]
2527#[inline(always)]
2528#[target_feature(enable = "neon")]
2529#[cfg_attr(test, assert_instr(cmle))]
2530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2531pub fn vclez_s8(a: int8x8_t) -> uint8x8_t {
2532    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2533    unsafe { simd_le(a, transmute(b)) }
2534}
2535#[doc = "Compare signed less than or equal to zero"]
2536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"]
2537#[inline(always)]
2538#[target_feature(enable = "neon")]
2539#[cfg_attr(test, assert_instr(cmle))]
2540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2541pub fn vclezq_s8(a: int8x16_t) -> uint8x16_t {
2542    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2543    unsafe { simd_le(a, transmute(b)) }
2544}
2545#[doc = "Compare signed less than or equal to zero"]
2546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"]
2547#[inline(always)]
2548#[target_feature(enable = "neon")]
2549#[cfg_attr(test, assert_instr(cmle))]
2550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2551pub fn vclez_s16(a: int16x4_t) -> uint16x4_t {
2552    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2553    unsafe { simd_le(a, transmute(b)) }
2554}
2555#[doc = "Compare signed less than or equal to zero"]
2556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"]
2557#[inline(always)]
2558#[target_feature(enable = "neon")]
2559#[cfg_attr(test, assert_instr(cmle))]
2560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2561pub fn vclezq_s16(a: int16x8_t) -> uint16x8_t {
2562    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2563    unsafe { simd_le(a, transmute(b)) }
2564}
2565#[doc = "Compare signed less than or equal to zero"]
2566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"]
2567#[inline(always)]
2568#[target_feature(enable = "neon")]
2569#[cfg_attr(test, assert_instr(cmle))]
2570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2571pub fn vclez_s32(a: int32x2_t) -> uint32x2_t {
2572    let b: i32x2 = i32x2::new(0, 0);
2573    unsafe { simd_le(a, transmute(b)) }
2574}
2575#[doc = "Compare signed less than or equal to zero"]
2576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"]
2577#[inline(always)]
2578#[target_feature(enable = "neon")]
2579#[cfg_attr(test, assert_instr(cmle))]
2580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2581pub fn vclezq_s32(a: int32x4_t) -> uint32x4_t {
2582    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2583    unsafe { simd_le(a, transmute(b)) }
2584}
2585#[doc = "Compare signed less than or equal to zero"]
2586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)"]
2587#[inline(always)]
2588#[target_feature(enable = "neon")]
2589#[cfg_attr(test, assert_instr(cmle))]
2590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2591pub fn vclez_s64(a: int64x1_t) -> uint64x1_t {
2592    let b: i64x1 = i64x1::new(0);
2593    unsafe { simd_le(a, transmute(b)) }
2594}
2595#[doc = "Compare signed less than or equal to zero"]
2596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"]
2597#[inline(always)]
2598#[target_feature(enable = "neon")]
2599#[cfg_attr(test, assert_instr(cmle))]
2600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2601pub fn vclezq_s64(a: int64x2_t) -> uint64x2_t {
2602    let b: i64x2 = i64x2::new(0, 0);
2603    unsafe { simd_le(a, transmute(b)) }
2604}
2605#[doc = "Floating-point compare less than or equal to zero"]
2606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)"]
2607#[inline(always)]
2608#[target_feature(enable = "neon")]
2609#[cfg_attr(test, assert_instr(fcmp))]
2610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2611pub fn vclezd_f64(a: f64) -> u64 {
2612    unsafe { simd_extract!(vclez_f64(vdup_n_f64(a)), 0) }
2613}
2614#[doc = "Floating-point compare less than or equal to zero"]
2615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)"]
2616#[inline(always)]
2617#[target_feature(enable = "neon")]
2618#[cfg_attr(test, assert_instr(fcmp))]
2619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2620pub fn vclezs_f32(a: f32) -> u32 {
2621    unsafe { simd_extract!(vclez_f32(vdup_n_f32(a)), 0) }
2622}
2623#[doc = "Compare less than or equal to zero"]
2624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)"]
2625#[inline(always)]
2626#[target_feature(enable = "neon")]
2627#[cfg_attr(test, assert_instr(cmp))]
2628#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2629pub fn vclezd_s64(a: i64) -> u64 {
2630    unsafe { transmute(vclez_s64(transmute(a))) }
2631}
2632#[doc = "Floating-point compare less than or equal to zero"]
2633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezh_f16)"]
2634#[inline(always)]
2635#[cfg_attr(test, assert_instr(fcmp))]
2636#[target_feature(enable = "neon,fp16")]
2637#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2638#[cfg(not(target_arch = "arm64ec"))]
2639pub fn vclezh_f16(a: f16) -> u16 {
2640    unsafe { simd_extract!(vclez_f16(vdup_n_f16(a)), 0) }
2641}
2642#[doc = "Floating-point compare less than"]
2643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)"]
2644#[inline(always)]
2645#[target_feature(enable = "neon")]
2646#[cfg_attr(test, assert_instr(fcmgt))]
2647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2648pub fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2649    unsafe { simd_lt(a, b) }
2650}
2651#[doc = "Floating-point compare less than"]
2652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"]
2653#[inline(always)]
2654#[target_feature(enable = "neon")]
2655#[cfg_attr(test, assert_instr(fcmgt))]
2656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2657pub fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2658    unsafe { simd_lt(a, b) }
2659}
2660#[doc = "Compare signed less than"]
2661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)"]
2662#[inline(always)]
2663#[target_feature(enable = "neon")]
2664#[cfg_attr(test, assert_instr(cmgt))]
2665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2666pub fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2667    unsafe { simd_lt(a, b) }
2668}
2669#[doc = "Compare signed less than"]
2670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"]
2671#[inline(always)]
2672#[target_feature(enable = "neon")]
2673#[cfg_attr(test, assert_instr(cmgt))]
2674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2675pub fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2676    unsafe { simd_lt(a, b) }
2677}
2678#[doc = "Compare unsigned less than"]
2679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)"]
2680#[inline(always)]
2681#[target_feature(enable = "neon")]
2682#[cfg_attr(test, assert_instr(cmhi))]
2683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2684pub fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2685    unsafe { simd_lt(a, b) }
2686}
2687#[doc = "Compare unsigned less than"]
2688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"]
2689#[inline(always)]
2690#[target_feature(enable = "neon")]
2691#[cfg_attr(test, assert_instr(cmhi))]
2692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2693pub fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2694    unsafe { simd_lt(a, b) }
2695}
2696#[doc = "Compare less than"]
2697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)"]
2698#[inline(always)]
2699#[target_feature(enable = "neon")]
2700#[cfg_attr(test, assert_instr(cmp))]
2701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2702pub fn vcltd_u64(a: u64, b: u64) -> u64 {
2703    unsafe { transmute(vclt_u64(transmute(a), transmute(b))) }
2704}
2705#[doc = "Compare less than"]
2706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)"]
2707#[inline(always)]
2708#[target_feature(enable = "neon")]
2709#[cfg_attr(test, assert_instr(cmp))]
2710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2711pub fn vcltd_s64(a: i64, b: i64) -> u64 {
2712    unsafe { transmute(vclt_s64(transmute(a), transmute(b))) }
2713}
2714#[doc = "Floating-point compare less than"]
2715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclth_f16)"]
2716#[inline(always)]
2717#[cfg_attr(test, assert_instr(fcmp))]
2718#[target_feature(enable = "neon,fp16")]
2719#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2720#[cfg(not(target_arch = "arm64ec"))]
2721pub fn vclth_f16(a: f16, b: f16) -> u16 {
2722    unsafe { simd_extract!(vclt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2723}
2724#[doc = "Floating-point compare less than"]
2725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)"]
2726#[inline(always)]
2727#[target_feature(enable = "neon")]
2728#[cfg_attr(test, assert_instr(fcmp))]
2729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2730pub fn vclts_f32(a: f32, b: f32) -> u32 {
2731    unsafe { simd_extract!(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2732}
2733#[doc = "Floating-point compare less than"]
2734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)"]
2735#[inline(always)]
2736#[target_feature(enable = "neon")]
2737#[cfg_attr(test, assert_instr(fcmp))]
2738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2739pub fn vcltd_f64(a: f64, b: f64) -> u64 {
2740    unsafe { simd_extract!(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2741}
2742#[doc = "Floating-point compare less than zero"]
2743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"]
2744#[inline(always)]
2745#[target_feature(enable = "neon")]
2746#[cfg_attr(test, assert_instr(fcmlt))]
2747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2748pub fn vcltz_f32(a: float32x2_t) -> uint32x2_t {
2749    let b: f32x2 = f32x2::new(0.0, 0.0);
2750    unsafe { simd_lt(a, transmute(b)) }
2751}
2752#[doc = "Floating-point compare less than zero"]
2753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"]
2754#[inline(always)]
2755#[target_feature(enable = "neon")]
2756#[cfg_attr(test, assert_instr(fcmlt))]
2757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2758pub fn vcltzq_f32(a: float32x4_t) -> uint32x4_t {
2759    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2760    unsafe { simd_lt(a, transmute(b)) }
2761}
2762#[doc = "Floating-point compare less than zero"]
2763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)"]
2764#[inline(always)]
2765#[target_feature(enable = "neon")]
2766#[cfg_attr(test, assert_instr(fcmlt))]
2767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2768pub fn vcltz_f64(a: float64x1_t) -> uint64x1_t {
2769    let b: f64 = 0.0;
2770    unsafe { simd_lt(a, transmute(b)) }
2771}
2772#[doc = "Floating-point compare less than zero"]
2773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"]
2774#[inline(always)]
2775#[target_feature(enable = "neon")]
2776#[cfg_attr(test, assert_instr(fcmlt))]
2777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2778pub fn vcltzq_f64(a: float64x2_t) -> uint64x2_t {
2779    let b: f64x2 = f64x2::new(0.0, 0.0);
2780    unsafe { simd_lt(a, transmute(b)) }
2781}
2782#[doc = "Compare signed less than zero"]
2783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"]
2784#[inline(always)]
2785#[target_feature(enable = "neon")]
2786#[cfg_attr(test, assert_instr(cmlt))]
2787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2788pub fn vcltz_s8(a: int8x8_t) -> uint8x8_t {
2789    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2790    unsafe { simd_lt(a, transmute(b)) }
2791}
2792#[doc = "Compare signed less than zero"]
2793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"]
2794#[inline(always)]
2795#[target_feature(enable = "neon")]
2796#[cfg_attr(test, assert_instr(cmlt))]
2797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2798pub fn vcltzq_s8(a: int8x16_t) -> uint8x16_t {
2799    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2800    unsafe { simd_lt(a, transmute(b)) }
2801}
2802#[doc = "Compare signed less than zero"]
2803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"]
2804#[inline(always)]
2805#[target_feature(enable = "neon")]
2806#[cfg_attr(test, assert_instr(cmlt))]
2807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2808pub fn vcltz_s16(a: int16x4_t) -> uint16x4_t {
2809    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2810    unsafe { simd_lt(a, transmute(b)) }
2811}
2812#[doc = "Compare signed less than zero"]
2813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"]
2814#[inline(always)]
2815#[target_feature(enable = "neon")]
2816#[cfg_attr(test, assert_instr(cmlt))]
2817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2818pub fn vcltzq_s16(a: int16x8_t) -> uint16x8_t {
2819    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2820    unsafe { simd_lt(a, transmute(b)) }
2821}
2822#[doc = "Compare signed less than zero"]
2823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"]
2824#[inline(always)]
2825#[target_feature(enable = "neon")]
2826#[cfg_attr(test, assert_instr(cmlt))]
2827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2828pub fn vcltz_s32(a: int32x2_t) -> uint32x2_t {
2829    let b: i32x2 = i32x2::new(0, 0);
2830    unsafe { simd_lt(a, transmute(b)) }
2831}
2832#[doc = "Compare signed less than zero"]
2833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"]
2834#[inline(always)]
2835#[target_feature(enable = "neon")]
2836#[cfg_attr(test, assert_instr(cmlt))]
2837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2838pub fn vcltzq_s32(a: int32x4_t) -> uint32x4_t {
2839    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2840    unsafe { simd_lt(a, transmute(b)) }
2841}
2842#[doc = "Compare signed less than zero"]
2843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)"]
2844#[inline(always)]
2845#[target_feature(enable = "neon")]
2846#[cfg_attr(test, assert_instr(cmlt))]
2847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2848pub fn vcltz_s64(a: int64x1_t) -> uint64x1_t {
2849    let b: i64x1 = i64x1::new(0);
2850    unsafe { simd_lt(a, transmute(b)) }
2851}
2852#[doc = "Compare signed less than zero"]
2853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"]
2854#[inline(always)]
2855#[target_feature(enable = "neon")]
2856#[cfg_attr(test, assert_instr(cmlt))]
2857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2858pub fn vcltzq_s64(a: int64x2_t) -> uint64x2_t {
2859    let b: i64x2 = i64x2::new(0, 0);
2860    unsafe { simd_lt(a, transmute(b)) }
2861}
2862#[doc = "Floating-point compare less than zero"]
2863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)"]
2864#[inline(always)]
2865#[target_feature(enable = "neon")]
2866#[cfg_attr(test, assert_instr(fcmp))]
2867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2868pub fn vcltzd_f64(a: f64) -> u64 {
2869    unsafe { simd_extract!(vcltz_f64(vdup_n_f64(a)), 0) }
2870}
2871#[doc = "Floating-point compare less than zero"]
2872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)"]
2873#[inline(always)]
2874#[target_feature(enable = "neon")]
2875#[cfg_attr(test, assert_instr(fcmp))]
2876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2877pub fn vcltzs_f32(a: f32) -> u32 {
2878    unsafe { simd_extract!(vcltz_f32(vdup_n_f32(a)), 0) }
2879}
2880#[doc = "Compare less than zero"]
2881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)"]
2882#[inline(always)]
2883#[target_feature(enable = "neon")]
2884#[cfg_attr(test, assert_instr(asr))]
2885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2886pub fn vcltzd_s64(a: i64) -> u64 {
2887    unsafe { transmute(vcltz_s64(transmute(a))) }
2888}
2889#[doc = "Floating-point compare less than zero"]
2890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzh_f16)"]
2891#[inline(always)]
2892#[cfg_attr(test, assert_instr(fcmp))]
2893#[target_feature(enable = "neon,fp16")]
2894#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2895#[cfg(not(target_arch = "arm64ec"))]
2896pub fn vcltzh_f16(a: f16) -> u16 {
2897    unsafe { simd_extract!(vcltz_f16(vdup_n_f16(a)), 0) }
2898}
2899#[doc = "Floating-point complex multiply accumulate"]
2900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f16)"]
2901#[inline(always)]
2902#[target_feature(enable = "neon,fcma")]
2903#[target_feature(enable = "neon,fp16")]
2904#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2905#[cfg(not(target_arch = "arm64ec"))]
2906#[cfg_attr(test, assert_instr(fcmla))]
2907pub fn vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
2908    unsafe extern "unadjusted" {
2909        #[cfg_attr(
2910            any(target_arch = "aarch64", target_arch = "arm64ec"),
2911            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f16"
2912        )]
2913        fn _vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
2914    }
2915    unsafe { _vcmla_f16(a, b, c) }
2916}
2917#[doc = "Floating-point complex multiply accumulate"]
2918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f16)"]
2919#[inline(always)]
2920#[target_feature(enable = "neon,fcma")]
2921#[target_feature(enable = "neon,fp16")]
2922#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2923#[cfg(not(target_arch = "arm64ec"))]
2924#[cfg_attr(test, assert_instr(fcmla))]
2925pub fn vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
2926    unsafe extern "unadjusted" {
2927        #[cfg_attr(
2928            any(target_arch = "aarch64", target_arch = "arm64ec"),
2929            link_name = "llvm.aarch64.neon.vcmla.rot0.v8f16"
2930        )]
2931        fn _vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
2932    }
2933    unsafe { _vcmlaq_f16(a, b, c) }
2934}
2935#[doc = "Floating-point complex multiply accumulate"]
2936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"]
2937#[inline(always)]
2938#[target_feature(enable = "neon,fcma")]
2939#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2940#[cfg_attr(test, assert_instr(fcmla))]
2941pub fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
2942    unsafe extern "unadjusted" {
2943        #[cfg_attr(
2944            any(target_arch = "aarch64", target_arch = "arm64ec"),
2945            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32"
2946        )]
2947        fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
2948    }
2949    unsafe { _vcmla_f32(a, b, c) }
2950}
2951#[doc = "Floating-point complex multiply accumulate"]
2952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"]
2953#[inline(always)]
2954#[target_feature(enable = "neon,fcma")]
2955#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2956#[cfg_attr(test, assert_instr(fcmla))]
2957pub fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
2958    unsafe extern "unadjusted" {
2959        #[cfg_attr(
2960            any(target_arch = "aarch64", target_arch = "arm64ec"),
2961            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32"
2962        )]
2963        fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
2964    }
2965    unsafe { _vcmlaq_f32(a, b, c) }
2966}
2967#[doc = "Floating-point complex multiply accumulate"]
2968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"]
2969#[inline(always)]
2970#[target_feature(enable = "neon,fcma")]
2971#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2972#[cfg_attr(test, assert_instr(fcmla))]
2973pub fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
2974    unsafe extern "unadjusted" {
2975        #[cfg_attr(
2976            any(target_arch = "aarch64", target_arch = "arm64ec"),
2977            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64"
2978        )]
2979        fn _vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
2980    }
2981    unsafe { _vcmlaq_f64(a, b, c) }
2982}
2983#[doc = "Floating-point complex multiply accumulate"]
2984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f16)"]
2985#[inline(always)]
2986#[target_feature(enable = "neon,fcma")]
2987#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2988#[rustc_legacy_const_generics(3)]
2989#[target_feature(enable = "neon,fp16")]
2990#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2991#[cfg(not(target_arch = "arm64ec"))]
2992pub fn vcmla_lane_f16<const LANE: i32>(
2993    a: float16x4_t,
2994    b: float16x4_t,
2995    c: float16x4_t,
2996) -> float16x4_t {
2997    static_assert_uimm_bits!(LANE, 1);
2998    unsafe {
2999        let c: float16x4_t = simd_shuffle!(
3000            c,
3001            c,
3002            [
3003                2 * LANE as u32,
3004                2 * LANE as u32 + 1,
3005                2 * LANE as u32,
3006                2 * LANE as u32 + 1
3007            ]
3008        );
3009        vcmla_f16(a, b, c)
3010    }
3011}
3012#[doc = "Floating-point complex multiply accumulate"]
3013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f16)"]
3014#[inline(always)]
3015#[target_feature(enable = "neon,fcma")]
3016#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3017#[rustc_legacy_const_generics(3)]
3018#[target_feature(enable = "neon,fp16")]
3019#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3020#[cfg(not(target_arch = "arm64ec"))]
3021pub fn vcmlaq_lane_f16<const LANE: i32>(
3022    a: float16x8_t,
3023    b: float16x8_t,
3024    c: float16x4_t,
3025) -> float16x8_t {
3026    static_assert_uimm_bits!(LANE, 1);
3027    unsafe {
3028        let c: float16x8_t = simd_shuffle!(
3029            c,
3030            c,
3031            [
3032                2 * LANE as u32,
3033                2 * LANE as u32 + 1,
3034                2 * LANE as u32,
3035                2 * LANE as u32 + 1,
3036                2 * LANE as u32,
3037                2 * LANE as u32 + 1,
3038                2 * LANE as u32,
3039                2 * LANE as u32 + 1
3040            ]
3041        );
3042        vcmlaq_f16(a, b, c)
3043    }
3044}
3045#[doc = "Floating-point complex multiply accumulate"]
3046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"]
3047#[inline(always)]
3048#[target_feature(enable = "neon,fcma")]
3049#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3050#[rustc_legacy_const_generics(3)]
3051#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3052pub fn vcmla_lane_f32<const LANE: i32>(
3053    a: float32x2_t,
3054    b: float32x2_t,
3055    c: float32x2_t,
3056) -> float32x2_t {
3057    static_assert!(LANE == 0);
3058    unsafe {
3059        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3060        vcmla_f32(a, b, c)
3061    }
3062}
3063#[doc = "Floating-point complex multiply accumulate"]
3064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"]
3065#[inline(always)]
3066#[target_feature(enable = "neon,fcma")]
3067#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3068#[rustc_legacy_const_generics(3)]
3069#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3070pub fn vcmlaq_lane_f32<const LANE: i32>(
3071    a: float32x4_t,
3072    b: float32x4_t,
3073    c: float32x2_t,
3074) -> float32x4_t {
3075    static_assert!(LANE == 0);
3076    unsafe {
3077        let c: float32x4_t = simd_shuffle!(
3078            c,
3079            c,
3080            [
3081                2 * LANE as u32,
3082                2 * LANE as u32 + 1,
3083                2 * LANE as u32,
3084                2 * LANE as u32 + 1
3085            ]
3086        );
3087        vcmlaq_f32(a, b, c)
3088    }
3089}
3090#[doc = "Floating-point complex multiply accumulate"]
3091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f16)"]
3092#[inline(always)]
3093#[target_feature(enable = "neon,fcma")]
3094#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3095#[rustc_legacy_const_generics(3)]
3096#[target_feature(enable = "neon,fp16")]
3097#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3098#[cfg(not(target_arch = "arm64ec"))]
3099pub fn vcmla_laneq_f16<const LANE: i32>(
3100    a: float16x4_t,
3101    b: float16x4_t,
3102    c: float16x8_t,
3103) -> float16x4_t {
3104    static_assert_uimm_bits!(LANE, 2);
3105    unsafe {
3106        let c: float16x4_t = simd_shuffle!(
3107            c,
3108            c,
3109            [
3110                2 * LANE as u32,
3111                2 * LANE as u32 + 1,
3112                2 * LANE as u32,
3113                2 * LANE as u32 + 1
3114            ]
3115        );
3116        vcmla_f16(a, b, c)
3117    }
3118}
3119#[doc = "Floating-point complex multiply accumulate"]
3120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f16)"]
3121#[inline(always)]
3122#[target_feature(enable = "neon,fcma")]
3123#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3124#[rustc_legacy_const_generics(3)]
3125#[target_feature(enable = "neon,fp16")]
3126#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3127#[cfg(not(target_arch = "arm64ec"))]
3128pub fn vcmlaq_laneq_f16<const LANE: i32>(
3129    a: float16x8_t,
3130    b: float16x8_t,
3131    c: float16x8_t,
3132) -> float16x8_t {
3133    static_assert_uimm_bits!(LANE, 2);
3134    unsafe {
3135        let c: float16x8_t = simd_shuffle!(
3136            c,
3137            c,
3138            [
3139                2 * LANE as u32,
3140                2 * LANE as u32 + 1,
3141                2 * LANE as u32,
3142                2 * LANE as u32 + 1,
3143                2 * LANE as u32,
3144                2 * LANE as u32 + 1,
3145                2 * LANE as u32,
3146                2 * LANE as u32 + 1
3147            ]
3148        );
3149        vcmlaq_f16(a, b, c)
3150    }
3151}
3152#[doc = "Floating-point complex multiply accumulate"]
3153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"]
3154#[inline(always)]
3155#[target_feature(enable = "neon,fcma")]
3156#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3157#[rustc_legacy_const_generics(3)]
3158#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3159pub fn vcmla_laneq_f32<const LANE: i32>(
3160    a: float32x2_t,
3161    b: float32x2_t,
3162    c: float32x4_t,
3163) -> float32x2_t {
3164    static_assert_uimm_bits!(LANE, 1);
3165    unsafe {
3166        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3167        vcmla_f32(a, b, c)
3168    }
3169}
3170#[doc = "Floating-point complex multiply accumulate"]
3171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"]
3172#[inline(always)]
3173#[target_feature(enable = "neon,fcma")]
3174#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3175#[rustc_legacy_const_generics(3)]
3176#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3177pub fn vcmlaq_laneq_f32<const LANE: i32>(
3178    a: float32x4_t,
3179    b: float32x4_t,
3180    c: float32x4_t,
3181) -> float32x4_t {
3182    static_assert_uimm_bits!(LANE, 1);
3183    unsafe {
3184        let c: float32x4_t = simd_shuffle!(
3185            c,
3186            c,
3187            [
3188                2 * LANE as u32,
3189                2 * LANE as u32 + 1,
3190                2 * LANE as u32,
3191                2 * LANE as u32 + 1
3192            ]
3193        );
3194        vcmlaq_f32(a, b, c)
3195    }
3196}
3197#[doc = "Floating-point complex multiply accumulate"]
3198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f16)"]
3199#[inline(always)]
3200#[target_feature(enable = "neon,fcma")]
3201#[target_feature(enable = "neon,fp16")]
3202#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3203#[cfg(not(target_arch = "arm64ec"))]
3204#[cfg_attr(test, assert_instr(fcmla))]
3205pub fn vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3206    unsafe extern "unadjusted" {
3207        #[cfg_attr(
3208            any(target_arch = "aarch64", target_arch = "arm64ec"),
3209            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f16"
3210        )]
3211        fn _vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3212    }
3213    unsafe { _vcmla_rot180_f16(a, b, c) }
3214}
3215#[doc = "Floating-point complex multiply accumulate"]
3216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f16)"]
3217#[inline(always)]
3218#[target_feature(enable = "neon,fcma")]
3219#[target_feature(enable = "neon,fp16")]
3220#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3221#[cfg(not(target_arch = "arm64ec"))]
3222#[cfg_attr(test, assert_instr(fcmla))]
3223pub fn vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3224    unsafe extern "unadjusted" {
3225        #[cfg_attr(
3226            any(target_arch = "aarch64", target_arch = "arm64ec"),
3227            link_name = "llvm.aarch64.neon.vcmla.rot180.v8f16"
3228        )]
3229        fn _vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3230    }
3231    unsafe { _vcmlaq_rot180_f16(a, b, c) }
3232}
3233#[doc = "Floating-point complex multiply accumulate"]
3234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"]
3235#[inline(always)]
3236#[target_feature(enable = "neon,fcma")]
3237#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3238#[cfg_attr(test, assert_instr(fcmla))]
3239pub fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3240    unsafe extern "unadjusted" {
3241        #[cfg_attr(
3242            any(target_arch = "aarch64", target_arch = "arm64ec"),
3243            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32"
3244        )]
3245        fn _vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3246    }
3247    unsafe { _vcmla_rot180_f32(a, b, c) }
3248}
3249#[doc = "Floating-point complex multiply accumulate"]
3250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"]
3251#[inline(always)]
3252#[target_feature(enable = "neon,fcma")]
3253#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3254#[cfg_attr(test, assert_instr(fcmla))]
3255pub fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3256    unsafe extern "unadjusted" {
3257        #[cfg_attr(
3258            any(target_arch = "aarch64", target_arch = "arm64ec"),
3259            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32"
3260        )]
3261        fn _vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3262    }
3263    unsafe { _vcmlaq_rot180_f32(a, b, c) }
3264}
3265#[doc = "Floating-point complex multiply accumulate"]
3266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"]
3267#[inline(always)]
3268#[target_feature(enable = "neon,fcma")]
3269#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3270#[cfg_attr(test, assert_instr(fcmla))]
3271pub fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3272    unsafe extern "unadjusted" {
3273        #[cfg_attr(
3274            any(target_arch = "aarch64", target_arch = "arm64ec"),
3275            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64"
3276        )]
3277        fn _vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3278    }
3279    unsafe { _vcmlaq_rot180_f64(a, b, c) }
3280}
3281#[doc = "Floating-point complex multiply accumulate"]
3282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f16)"]
3283#[inline(always)]
3284#[target_feature(enable = "neon,fcma")]
3285#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3286#[rustc_legacy_const_generics(3)]
3287#[target_feature(enable = "neon,fp16")]
3288#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3289#[cfg(not(target_arch = "arm64ec"))]
3290pub fn vcmla_rot180_lane_f16<const LANE: i32>(
3291    a: float16x4_t,
3292    b: float16x4_t,
3293    c: float16x4_t,
3294) -> float16x4_t {
3295    static_assert_uimm_bits!(LANE, 1);
3296    unsafe {
3297        let c: float16x4_t = simd_shuffle!(
3298            c,
3299            c,
3300            [
3301                2 * LANE as u32,
3302                2 * LANE as u32 + 1,
3303                2 * LANE as u32,
3304                2 * LANE as u32 + 1
3305            ]
3306        );
3307        vcmla_rot180_f16(a, b, c)
3308    }
3309}
3310#[doc = "Floating-point complex multiply accumulate"]
3311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f16)"]
3312#[inline(always)]
3313#[target_feature(enable = "neon,fcma")]
3314#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3315#[rustc_legacy_const_generics(3)]
3316#[target_feature(enable = "neon,fp16")]
3317#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3318#[cfg(not(target_arch = "arm64ec"))]
3319pub fn vcmlaq_rot180_lane_f16<const LANE: i32>(
3320    a: float16x8_t,
3321    b: float16x8_t,
3322    c: float16x4_t,
3323) -> float16x8_t {
3324    static_assert_uimm_bits!(LANE, 1);
3325    unsafe {
3326        let c: float16x8_t = simd_shuffle!(
3327            c,
3328            c,
3329            [
3330                2 * LANE as u32,
3331                2 * LANE as u32 + 1,
3332                2 * LANE as u32,
3333                2 * LANE as u32 + 1,
3334                2 * LANE as u32,
3335                2 * LANE as u32 + 1,
3336                2 * LANE as u32,
3337                2 * LANE as u32 + 1
3338            ]
3339        );
3340        vcmlaq_rot180_f16(a, b, c)
3341    }
3342}
3343#[doc = "Floating-point complex multiply accumulate"]
3344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"]
3345#[inline(always)]
3346#[target_feature(enable = "neon,fcma")]
3347#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3348#[rustc_legacy_const_generics(3)]
3349#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3350pub fn vcmla_rot180_lane_f32<const LANE: i32>(
3351    a: float32x2_t,
3352    b: float32x2_t,
3353    c: float32x2_t,
3354) -> float32x2_t {
3355    static_assert!(LANE == 0);
3356    unsafe {
3357        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3358        vcmla_rot180_f32(a, b, c)
3359    }
3360}
3361#[doc = "Floating-point complex multiply accumulate"]
3362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"]
3363#[inline(always)]
3364#[target_feature(enable = "neon,fcma")]
3365#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3366#[rustc_legacy_const_generics(3)]
3367#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3368pub fn vcmlaq_rot180_lane_f32<const LANE: i32>(
3369    a: float32x4_t,
3370    b: float32x4_t,
3371    c: float32x2_t,
3372) -> float32x4_t {
3373    static_assert!(LANE == 0);
3374    unsafe {
3375        let c: float32x4_t = simd_shuffle!(
3376            c,
3377            c,
3378            [
3379                2 * LANE as u32,
3380                2 * LANE as u32 + 1,
3381                2 * LANE as u32,
3382                2 * LANE as u32 + 1
3383            ]
3384        );
3385        vcmlaq_rot180_f32(a, b, c)
3386    }
3387}
3388#[doc = "Floating-point complex multiply accumulate"]
3389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f16)"]
3390#[inline(always)]
3391#[target_feature(enable = "neon,fcma")]
3392#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3393#[rustc_legacy_const_generics(3)]
3394#[target_feature(enable = "neon,fp16")]
3395#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3396#[cfg(not(target_arch = "arm64ec"))]
3397pub fn vcmla_rot180_laneq_f16<const LANE: i32>(
3398    a: float16x4_t,
3399    b: float16x4_t,
3400    c: float16x8_t,
3401) -> float16x4_t {
3402    static_assert_uimm_bits!(LANE, 2);
3403    unsafe {
3404        let c: float16x4_t = simd_shuffle!(
3405            c,
3406            c,
3407            [
3408                2 * LANE as u32,
3409                2 * LANE as u32 + 1,
3410                2 * LANE as u32,
3411                2 * LANE as u32 + 1
3412            ]
3413        );
3414        vcmla_rot180_f16(a, b, c)
3415    }
3416}
3417#[doc = "Floating-point complex multiply accumulate"]
3418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f16)"]
3419#[inline(always)]
3420#[target_feature(enable = "neon,fcma")]
3421#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3422#[rustc_legacy_const_generics(3)]
3423#[target_feature(enable = "neon,fp16")]
3424#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3425#[cfg(not(target_arch = "arm64ec"))]
3426pub fn vcmlaq_rot180_laneq_f16<const LANE: i32>(
3427    a: float16x8_t,
3428    b: float16x8_t,
3429    c: float16x8_t,
3430) -> float16x8_t {
3431    static_assert_uimm_bits!(LANE, 2);
3432    unsafe {
3433        let c: float16x8_t = simd_shuffle!(
3434            c,
3435            c,
3436            [
3437                2 * LANE as u32,
3438                2 * LANE as u32 + 1,
3439                2 * LANE as u32,
3440                2 * LANE as u32 + 1,
3441                2 * LANE as u32,
3442                2 * LANE as u32 + 1,
3443                2 * LANE as u32,
3444                2 * LANE as u32 + 1
3445            ]
3446        );
3447        vcmlaq_rot180_f16(a, b, c)
3448    }
3449}
3450#[doc = "Floating-point complex multiply accumulate"]
3451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"]
3452#[inline(always)]
3453#[target_feature(enable = "neon,fcma")]
3454#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3455#[rustc_legacy_const_generics(3)]
3456#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3457pub fn vcmla_rot180_laneq_f32<const LANE: i32>(
3458    a: float32x2_t,
3459    b: float32x2_t,
3460    c: float32x4_t,
3461) -> float32x2_t {
3462    static_assert_uimm_bits!(LANE, 1);
3463    unsafe {
3464        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3465        vcmla_rot180_f32(a, b, c)
3466    }
3467}
3468#[doc = "Floating-point complex multiply accumulate"]
3469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"]
3470#[inline(always)]
3471#[target_feature(enable = "neon,fcma")]
3472#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3473#[rustc_legacy_const_generics(3)]
3474#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3475pub fn vcmlaq_rot180_laneq_f32<const LANE: i32>(
3476    a: float32x4_t,
3477    b: float32x4_t,
3478    c: float32x4_t,
3479) -> float32x4_t {
3480    static_assert_uimm_bits!(LANE, 1);
3481    unsafe {
3482        let c: float32x4_t = simd_shuffle!(
3483            c,
3484            c,
3485            [
3486                2 * LANE as u32,
3487                2 * LANE as u32 + 1,
3488                2 * LANE as u32,
3489                2 * LANE as u32 + 1
3490            ]
3491        );
3492        vcmlaq_rot180_f32(a, b, c)
3493    }
3494}
3495#[doc = "Floating-point complex multiply accumulate"]
3496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f16)"]
3497#[inline(always)]
3498#[target_feature(enable = "neon,fcma")]
3499#[target_feature(enable = "neon,fp16")]
3500#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3501#[cfg(not(target_arch = "arm64ec"))]
3502#[cfg_attr(test, assert_instr(fcmla))]
3503pub fn vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3504    unsafe extern "unadjusted" {
3505        #[cfg_attr(
3506            any(target_arch = "aarch64", target_arch = "arm64ec"),
3507            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f16"
3508        )]
3509        fn _vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3510    }
3511    unsafe { _vcmla_rot270_f16(a, b, c) }
3512}
3513#[doc = "Floating-point complex multiply accumulate"]
3514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f16)"]
3515#[inline(always)]
3516#[target_feature(enable = "neon,fcma")]
3517#[target_feature(enable = "neon,fp16")]
3518#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3519#[cfg(not(target_arch = "arm64ec"))]
3520#[cfg_attr(test, assert_instr(fcmla))]
3521pub fn vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3522    unsafe extern "unadjusted" {
3523        #[cfg_attr(
3524            any(target_arch = "aarch64", target_arch = "arm64ec"),
3525            link_name = "llvm.aarch64.neon.vcmla.rot270.v8f16"
3526        )]
3527        fn _vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3528    }
3529    unsafe { _vcmlaq_rot270_f16(a, b, c) }
3530}
3531#[doc = "Floating-point complex multiply accumulate"]
3532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"]
3533#[inline(always)]
3534#[target_feature(enable = "neon,fcma")]
3535#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3536#[cfg_attr(test, assert_instr(fcmla))]
3537pub fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3538    unsafe extern "unadjusted" {
3539        #[cfg_attr(
3540            any(target_arch = "aarch64", target_arch = "arm64ec"),
3541            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32"
3542        )]
3543        fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3544    }
3545    unsafe { _vcmla_rot270_f32(a, b, c) }
3546}
3547#[doc = "Floating-point complex multiply accumulate"]
3548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"]
3549#[inline(always)]
3550#[target_feature(enable = "neon,fcma")]
3551#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3552#[cfg_attr(test, assert_instr(fcmla))]
3553pub fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3554    unsafe extern "unadjusted" {
3555        #[cfg_attr(
3556            any(target_arch = "aarch64", target_arch = "arm64ec"),
3557            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32"
3558        )]
3559        fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3560    }
3561    unsafe { _vcmlaq_rot270_f32(a, b, c) }
3562}
3563#[doc = "Floating-point complex multiply accumulate"]
3564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"]
3565#[inline(always)]
3566#[target_feature(enable = "neon,fcma")]
3567#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3568#[cfg_attr(test, assert_instr(fcmla))]
3569pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3570    unsafe extern "unadjusted" {
3571        #[cfg_attr(
3572            any(target_arch = "aarch64", target_arch = "arm64ec"),
3573            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64"
3574        )]
3575        fn _vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3576    }
3577    unsafe { _vcmlaq_rot270_f64(a, b, c) }
3578}
3579#[doc = "Floating-point complex multiply accumulate"]
3580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f16)"]
3581#[inline(always)]
3582#[target_feature(enable = "neon,fcma")]
3583#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3584#[rustc_legacy_const_generics(3)]
3585#[target_feature(enable = "neon,fp16")]
3586#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3587#[cfg(not(target_arch = "arm64ec"))]
3588pub fn vcmla_rot270_lane_f16<const LANE: i32>(
3589    a: float16x4_t,
3590    b: float16x4_t,
3591    c: float16x4_t,
3592) -> float16x4_t {
3593    static_assert_uimm_bits!(LANE, 1);
3594    unsafe {
3595        let c: float16x4_t = simd_shuffle!(
3596            c,
3597            c,
3598            [
3599                2 * LANE as u32,
3600                2 * LANE as u32 + 1,
3601                2 * LANE as u32,
3602                2 * LANE as u32 + 1
3603            ]
3604        );
3605        vcmla_rot270_f16(a, b, c)
3606    }
3607}
3608#[doc = "Floating-point complex multiply accumulate"]
3609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f16)"]
3610#[inline(always)]
3611#[target_feature(enable = "neon,fcma")]
3612#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3613#[rustc_legacy_const_generics(3)]
3614#[target_feature(enable = "neon,fp16")]
3615#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3616#[cfg(not(target_arch = "arm64ec"))]
3617pub fn vcmlaq_rot270_lane_f16<const LANE: i32>(
3618    a: float16x8_t,
3619    b: float16x8_t,
3620    c: float16x4_t,
3621) -> float16x8_t {
3622    static_assert_uimm_bits!(LANE, 1);
3623    unsafe {
3624        let c: float16x8_t = simd_shuffle!(
3625            c,
3626            c,
3627            [
3628                2 * LANE as u32,
3629                2 * LANE as u32 + 1,
3630                2 * LANE as u32,
3631                2 * LANE as u32 + 1,
3632                2 * LANE as u32,
3633                2 * LANE as u32 + 1,
3634                2 * LANE as u32,
3635                2 * LANE as u32 + 1
3636            ]
3637        );
3638        vcmlaq_rot270_f16(a, b, c)
3639    }
3640}
3641#[doc = "Floating-point complex multiply accumulate"]
3642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"]
3643#[inline(always)]
3644#[target_feature(enable = "neon,fcma")]
3645#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3646#[rustc_legacy_const_generics(3)]
3647#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3648pub fn vcmla_rot270_lane_f32<const LANE: i32>(
3649    a: float32x2_t,
3650    b: float32x2_t,
3651    c: float32x2_t,
3652) -> float32x2_t {
3653    static_assert!(LANE == 0);
3654    unsafe {
3655        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3656        vcmla_rot270_f32(a, b, c)
3657    }
3658}
3659#[doc = "Floating-point complex multiply accumulate"]
3660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"]
3661#[inline(always)]
3662#[target_feature(enable = "neon,fcma")]
3663#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3664#[rustc_legacy_const_generics(3)]
3665#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3666pub fn vcmlaq_rot270_lane_f32<const LANE: i32>(
3667    a: float32x4_t,
3668    b: float32x4_t,
3669    c: float32x2_t,
3670) -> float32x4_t {
3671    static_assert!(LANE == 0);
3672    unsafe {
3673        let c: float32x4_t = simd_shuffle!(
3674            c,
3675            c,
3676            [
3677                2 * LANE as u32,
3678                2 * LANE as u32 + 1,
3679                2 * LANE as u32,
3680                2 * LANE as u32 + 1
3681            ]
3682        );
3683        vcmlaq_rot270_f32(a, b, c)
3684    }
3685}
3686#[doc = "Floating-point complex multiply accumulate"]
3687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f16)"]
3688#[inline(always)]
3689#[target_feature(enable = "neon,fcma")]
3690#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3691#[rustc_legacy_const_generics(3)]
3692#[target_feature(enable = "neon,fp16")]
3693#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3694#[cfg(not(target_arch = "arm64ec"))]
3695pub fn vcmla_rot270_laneq_f16<const LANE: i32>(
3696    a: float16x4_t,
3697    b: float16x4_t,
3698    c: float16x8_t,
3699) -> float16x4_t {
3700    static_assert_uimm_bits!(LANE, 2);
3701    unsafe {
3702        let c: float16x4_t = simd_shuffle!(
3703            c,
3704            c,
3705            [
3706                2 * LANE as u32,
3707                2 * LANE as u32 + 1,
3708                2 * LANE as u32,
3709                2 * LANE as u32 + 1
3710            ]
3711        );
3712        vcmla_rot270_f16(a, b, c)
3713    }
3714}
3715#[doc = "Floating-point complex multiply accumulate"]
3716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f16)"]
3717#[inline(always)]
3718#[target_feature(enable = "neon,fcma")]
3719#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3720#[rustc_legacy_const_generics(3)]
3721#[target_feature(enable = "neon,fp16")]
3722#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3723#[cfg(not(target_arch = "arm64ec"))]
3724pub fn vcmlaq_rot270_laneq_f16<const LANE: i32>(
3725    a: float16x8_t,
3726    b: float16x8_t,
3727    c: float16x8_t,
3728) -> float16x8_t {
3729    static_assert_uimm_bits!(LANE, 2);
3730    unsafe {
3731        let c: float16x8_t = simd_shuffle!(
3732            c,
3733            c,
3734            [
3735                2 * LANE as u32,
3736                2 * LANE as u32 + 1,
3737                2 * LANE as u32,
3738                2 * LANE as u32 + 1,
3739                2 * LANE as u32,
3740                2 * LANE as u32 + 1,
3741                2 * LANE as u32,
3742                2 * LANE as u32 + 1
3743            ]
3744        );
3745        vcmlaq_rot270_f16(a, b, c)
3746    }
3747}
3748#[doc = "Floating-point complex multiply accumulate"]
3749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"]
3750#[inline(always)]
3751#[target_feature(enable = "neon,fcma")]
3752#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3753#[rustc_legacy_const_generics(3)]
3754#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3755pub fn vcmla_rot270_laneq_f32<const LANE: i32>(
3756    a: float32x2_t,
3757    b: float32x2_t,
3758    c: float32x4_t,
3759) -> float32x2_t {
3760    static_assert_uimm_bits!(LANE, 1);
3761    unsafe {
3762        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3763        vcmla_rot270_f32(a, b, c)
3764    }
3765}
3766#[doc = "Floating-point complex multiply accumulate"]
3767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"]
3768#[inline(always)]
3769#[target_feature(enable = "neon,fcma")]
3770#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3771#[rustc_legacy_const_generics(3)]
3772#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3773pub fn vcmlaq_rot270_laneq_f32<const LANE: i32>(
3774    a: float32x4_t,
3775    b: float32x4_t,
3776    c: float32x4_t,
3777) -> float32x4_t {
3778    static_assert_uimm_bits!(LANE, 1);
3779    unsafe {
3780        let c: float32x4_t = simd_shuffle!(
3781            c,
3782            c,
3783            [
3784                2 * LANE as u32,
3785                2 * LANE as u32 + 1,
3786                2 * LANE as u32,
3787                2 * LANE as u32 + 1
3788            ]
3789        );
3790        vcmlaq_rot270_f32(a, b, c)
3791    }
3792}
3793#[doc = "Floating-point complex multiply accumulate"]
3794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f16)"]
3795#[inline(always)]
3796#[target_feature(enable = "neon,fcma")]
3797#[target_feature(enable = "neon,fp16")]
3798#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3799#[cfg(not(target_arch = "arm64ec"))]
3800#[cfg_attr(test, assert_instr(fcmla))]
3801pub fn vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3802    unsafe extern "unadjusted" {
3803        #[cfg_attr(
3804            any(target_arch = "aarch64", target_arch = "arm64ec"),
3805            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f16"
3806        )]
3807        fn _vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3808    }
3809    unsafe { _vcmla_rot90_f16(a, b, c) }
3810}
3811#[doc = "Floating-point complex multiply accumulate"]
3812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f16)"]
3813#[inline(always)]
3814#[target_feature(enable = "neon,fcma")]
3815#[target_feature(enable = "neon,fp16")]
3816#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3817#[cfg(not(target_arch = "arm64ec"))]
3818#[cfg_attr(test, assert_instr(fcmla))]
3819pub fn vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3820    unsafe extern "unadjusted" {
3821        #[cfg_attr(
3822            any(target_arch = "aarch64", target_arch = "arm64ec"),
3823            link_name = "llvm.aarch64.neon.vcmla.rot90.v8f16"
3824        )]
3825        fn _vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3826    }
3827    unsafe { _vcmlaq_rot90_f16(a, b, c) }
3828}
3829#[doc = "Floating-point complex multiply accumulate"]
3830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"]
3831#[inline(always)]
3832#[target_feature(enable = "neon,fcma")]
3833#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3834#[cfg_attr(test, assert_instr(fcmla))]
3835pub fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3836    unsafe extern "unadjusted" {
3837        #[cfg_attr(
3838            any(target_arch = "aarch64", target_arch = "arm64ec"),
3839            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32"
3840        )]
3841        fn _vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3842    }
3843    unsafe { _vcmla_rot90_f32(a, b, c) }
3844}
3845#[doc = "Floating-point complex multiply accumulate"]
3846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"]
3847#[inline(always)]
3848#[target_feature(enable = "neon,fcma")]
3849#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3850#[cfg_attr(test, assert_instr(fcmla))]
3851pub fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3852    unsafe extern "unadjusted" {
3853        #[cfg_attr(
3854            any(target_arch = "aarch64", target_arch = "arm64ec"),
3855            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32"
3856        )]
3857        fn _vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3858    }
3859    unsafe { _vcmlaq_rot90_f32(a, b, c) }
3860}
3861#[doc = "Floating-point complex multiply accumulate"]
3862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"]
3863#[inline(always)]
3864#[target_feature(enable = "neon,fcma")]
3865#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3866#[cfg_attr(test, assert_instr(fcmla))]
3867pub fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3868    unsafe extern "unadjusted" {
3869        #[cfg_attr(
3870            any(target_arch = "aarch64", target_arch = "arm64ec"),
3871            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64"
3872        )]
3873        fn _vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3874    }
3875    unsafe { _vcmlaq_rot90_f64(a, b, c) }
3876}
3877#[doc = "Floating-point complex multiply accumulate"]
3878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f16)"]
3879#[inline(always)]
3880#[target_feature(enable = "neon,fcma")]
3881#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3882#[rustc_legacy_const_generics(3)]
3883#[target_feature(enable = "neon,fp16")]
3884#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3885#[cfg(not(target_arch = "arm64ec"))]
3886pub fn vcmla_rot90_lane_f16<const LANE: i32>(
3887    a: float16x4_t,
3888    b: float16x4_t,
3889    c: float16x4_t,
3890) -> float16x4_t {
3891    static_assert_uimm_bits!(LANE, 1);
3892    unsafe {
3893        let c: float16x4_t = simd_shuffle!(
3894            c,
3895            c,
3896            [
3897                2 * LANE as u32,
3898                2 * LANE as u32 + 1,
3899                2 * LANE as u32,
3900                2 * LANE as u32 + 1
3901            ]
3902        );
3903        vcmla_rot90_f16(a, b, c)
3904    }
3905}
3906#[doc = "Floating-point complex multiply accumulate"]
3907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f16)"]
3908#[inline(always)]
3909#[target_feature(enable = "neon,fcma")]
3910#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3911#[rustc_legacy_const_generics(3)]
3912#[target_feature(enable = "neon,fp16")]
3913#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3914#[cfg(not(target_arch = "arm64ec"))]
3915pub fn vcmlaq_rot90_lane_f16<const LANE: i32>(
3916    a: float16x8_t,
3917    b: float16x8_t,
3918    c: float16x4_t,
3919) -> float16x8_t {
3920    static_assert_uimm_bits!(LANE, 1);
3921    unsafe {
3922        let c: float16x8_t = simd_shuffle!(
3923            c,
3924            c,
3925            [
3926                2 * LANE as u32,
3927                2 * LANE as u32 + 1,
3928                2 * LANE as u32,
3929                2 * LANE as u32 + 1,
3930                2 * LANE as u32,
3931                2 * LANE as u32 + 1,
3932                2 * LANE as u32,
3933                2 * LANE as u32 + 1
3934            ]
3935        );
3936        vcmlaq_rot90_f16(a, b, c)
3937    }
3938}
3939#[doc = "Floating-point complex multiply accumulate"]
3940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"]
3941#[inline(always)]
3942#[target_feature(enable = "neon,fcma")]
3943#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3944#[rustc_legacy_const_generics(3)]
3945#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3946pub fn vcmla_rot90_lane_f32<const LANE: i32>(
3947    a: float32x2_t,
3948    b: float32x2_t,
3949    c: float32x2_t,
3950) -> float32x2_t {
3951    static_assert!(LANE == 0);
3952    unsafe {
3953        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3954        vcmla_rot90_f32(a, b, c)
3955    }
3956}
3957#[doc = "Floating-point complex multiply accumulate"]
3958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"]
3959#[inline(always)]
3960#[target_feature(enable = "neon,fcma")]
3961#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3962#[rustc_legacy_const_generics(3)]
3963#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3964pub fn vcmlaq_rot90_lane_f32<const LANE: i32>(
3965    a: float32x4_t,
3966    b: float32x4_t,
3967    c: float32x2_t,
3968) -> float32x4_t {
3969    static_assert!(LANE == 0);
3970    unsafe {
3971        let c: float32x4_t = simd_shuffle!(
3972            c,
3973            c,
3974            [
3975                2 * LANE as u32,
3976                2 * LANE as u32 + 1,
3977                2 * LANE as u32,
3978                2 * LANE as u32 + 1
3979            ]
3980        );
3981        vcmlaq_rot90_f32(a, b, c)
3982    }
3983}
3984#[doc = "Floating-point complex multiply accumulate"]
3985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f16)"]
3986#[inline(always)]
3987#[target_feature(enable = "neon,fcma")]
3988#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3989#[rustc_legacy_const_generics(3)]
3990#[target_feature(enable = "neon,fp16")]
3991#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3992#[cfg(not(target_arch = "arm64ec"))]
3993pub fn vcmla_rot90_laneq_f16<const LANE: i32>(
3994    a: float16x4_t,
3995    b: float16x4_t,
3996    c: float16x8_t,
3997) -> float16x4_t {
3998    static_assert_uimm_bits!(LANE, 2);
3999    unsafe {
4000        let c: float16x4_t = simd_shuffle!(
4001            c,
4002            c,
4003            [
4004                2 * LANE as u32,
4005                2 * LANE as u32 + 1,
4006                2 * LANE as u32,
4007                2 * LANE as u32 + 1
4008            ]
4009        );
4010        vcmla_rot90_f16(a, b, c)
4011    }
4012}
4013#[doc = "Floating-point complex multiply accumulate"]
4014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f16)"]
4015#[inline(always)]
4016#[target_feature(enable = "neon,fcma")]
4017#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4018#[rustc_legacy_const_generics(3)]
4019#[target_feature(enable = "neon,fp16")]
4020#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4021#[cfg(not(target_arch = "arm64ec"))]
4022pub fn vcmlaq_rot90_laneq_f16<const LANE: i32>(
4023    a: float16x8_t,
4024    b: float16x8_t,
4025    c: float16x8_t,
4026) -> float16x8_t {
4027    static_assert_uimm_bits!(LANE, 2);
4028    unsafe {
4029        let c: float16x8_t = simd_shuffle!(
4030            c,
4031            c,
4032            [
4033                2 * LANE as u32,
4034                2 * LANE as u32 + 1,
4035                2 * LANE as u32,
4036                2 * LANE as u32 + 1,
4037                2 * LANE as u32,
4038                2 * LANE as u32 + 1,
4039                2 * LANE as u32,
4040                2 * LANE as u32 + 1
4041            ]
4042        );
4043        vcmlaq_rot90_f16(a, b, c)
4044    }
4045}
4046#[doc = "Floating-point complex multiply accumulate"]
4047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"]
4048#[inline(always)]
4049#[target_feature(enable = "neon,fcma")]
4050#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4051#[rustc_legacy_const_generics(3)]
4052#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4053pub fn vcmla_rot90_laneq_f32<const LANE: i32>(
4054    a: float32x2_t,
4055    b: float32x2_t,
4056    c: float32x4_t,
4057) -> float32x2_t {
4058    static_assert_uimm_bits!(LANE, 1);
4059    unsafe {
4060        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
4061        vcmla_rot90_f32(a, b, c)
4062    }
4063}
4064#[doc = "Floating-point complex multiply accumulate"]
4065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"]
4066#[inline(always)]
4067#[target_feature(enable = "neon,fcma")]
4068#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4069#[rustc_legacy_const_generics(3)]
4070#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4071pub fn vcmlaq_rot90_laneq_f32<const LANE: i32>(
4072    a: float32x4_t,
4073    b: float32x4_t,
4074    c: float32x4_t,
4075) -> float32x4_t {
4076    static_assert_uimm_bits!(LANE, 1);
4077    unsafe {
4078        let c: float32x4_t = simd_shuffle!(
4079            c,
4080            c,
4081            [
4082                2 * LANE as u32,
4083                2 * LANE as u32 + 1,
4084                2 * LANE as u32,
4085                2 * LANE as u32 + 1
4086            ]
4087        );
4088        vcmlaq_rot90_f32(a, b, c)
4089    }
4090}
4091#[doc = "Insert vector element from another vector element"]
4092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"]
4093#[inline(always)]
4094#[target_feature(enable = "neon")]
4095#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4096#[rustc_legacy_const_generics(1, 3)]
4097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4098pub fn vcopy_lane_f32<const LANE1: i32, const LANE2: i32>(
4099    a: float32x2_t,
4100    b: float32x2_t,
4101) -> float32x2_t {
4102    static_assert_uimm_bits!(LANE1, 1);
4103    static_assert_uimm_bits!(LANE2, 1);
4104    unsafe {
4105        match LANE1 & 0b1 {
4106            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4107            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4108            _ => unreachable_unchecked(),
4109        }
4110    }
4111}
4112#[doc = "Insert vector element from another vector element"]
4113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"]
4114#[inline(always)]
4115#[target_feature(enable = "neon")]
4116#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4117#[rustc_legacy_const_generics(1, 3)]
4118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4119pub fn vcopy_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
4120    static_assert_uimm_bits!(LANE1, 3);
4121    static_assert_uimm_bits!(LANE2, 3);
4122    unsafe {
4123        match LANE1 & 0b111 {
4124            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4125            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4126            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4127            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4128            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4129            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4130            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4131            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4132            _ => unreachable_unchecked(),
4133        }
4134    }
4135}
4136#[doc = "Insert vector element from another vector element"]
4137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"]
4138#[inline(always)]
4139#[target_feature(enable = "neon")]
4140#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4141#[rustc_legacy_const_generics(1, 3)]
4142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4143pub fn vcopy_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
4144    static_assert_uimm_bits!(LANE1, 2);
4145    static_assert_uimm_bits!(LANE2, 2);
4146    unsafe {
4147        match LANE1 & 0b11 {
4148            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4149            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4150            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4151            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4152            _ => unreachable_unchecked(),
4153        }
4154    }
4155}
4156#[doc = "Insert vector element from another vector element"]
4157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"]
4158#[inline(always)]
4159#[target_feature(enable = "neon")]
4160#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4161#[rustc_legacy_const_generics(1, 3)]
4162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4163pub fn vcopy_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
4164    static_assert_uimm_bits!(LANE1, 1);
4165    static_assert_uimm_bits!(LANE2, 1);
4166    unsafe {
4167        match LANE1 & 0b1 {
4168            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4169            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4170            _ => unreachable_unchecked(),
4171        }
4172    }
4173}
4174#[doc = "Insert vector element from another vector element"]
4175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"]
4176#[inline(always)]
4177#[target_feature(enable = "neon")]
4178#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4179#[rustc_legacy_const_generics(1, 3)]
4180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4181pub fn vcopy_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
4182    static_assert_uimm_bits!(LANE1, 3);
4183    static_assert_uimm_bits!(LANE2, 3);
4184    unsafe {
4185        match LANE1 & 0b111 {
4186            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4187            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4188            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4189            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4190            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4191            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4192            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4193            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4194            _ => unreachable_unchecked(),
4195        }
4196    }
4197}
4198#[doc = "Insert vector element from another vector element"]
4199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"]
4200#[inline(always)]
4201#[target_feature(enable = "neon")]
4202#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4203#[rustc_legacy_const_generics(1, 3)]
4204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4205pub fn vcopy_lane_u16<const LANE1: i32, const LANE2: i32>(
4206    a: uint16x4_t,
4207    b: uint16x4_t,
4208) -> uint16x4_t {
4209    static_assert_uimm_bits!(LANE1, 2);
4210    static_assert_uimm_bits!(LANE2, 2);
4211    unsafe {
4212        match LANE1 & 0b11 {
4213            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4214            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4215            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4216            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4217            _ => unreachable_unchecked(),
4218        }
4219    }
4220}
4221#[doc = "Insert vector element from another vector element"]
4222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"]
4223#[inline(always)]
4224#[target_feature(enable = "neon")]
4225#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4226#[rustc_legacy_const_generics(1, 3)]
4227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4228pub fn vcopy_lane_u32<const LANE1: i32, const LANE2: i32>(
4229    a: uint32x2_t,
4230    b: uint32x2_t,
4231) -> uint32x2_t {
4232    static_assert_uimm_bits!(LANE1, 1);
4233    static_assert_uimm_bits!(LANE2, 1);
4234    unsafe {
4235        match LANE1 & 0b1 {
4236            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4237            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4238            _ => unreachable_unchecked(),
4239        }
4240    }
4241}
4242#[doc = "Insert vector element from another vector element"]
4243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"]
4244#[inline(always)]
4245#[target_feature(enable = "neon")]
4246#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4247#[rustc_legacy_const_generics(1, 3)]
4248#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4249pub fn vcopy_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
4250    static_assert_uimm_bits!(LANE1, 3);
4251    static_assert_uimm_bits!(LANE2, 3);
4252    unsafe {
4253        match LANE1 & 0b111 {
4254            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4255            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4256            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4257            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4258            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4259            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4260            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4261            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4262            _ => unreachable_unchecked(),
4263        }
4264    }
4265}
4266#[doc = "Insert vector element from another vector element"]
4267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"]
4268#[inline(always)]
4269#[target_feature(enable = "neon")]
4270#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4271#[rustc_legacy_const_generics(1, 3)]
4272#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4273pub fn vcopy_lane_p16<const LANE1: i32, const LANE2: i32>(
4274    a: poly16x4_t,
4275    b: poly16x4_t,
4276) -> poly16x4_t {
4277    static_assert_uimm_bits!(LANE1, 2);
4278    static_assert_uimm_bits!(LANE2, 2);
4279    unsafe {
4280        match LANE1 & 0b11 {
4281            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4282            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4283            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4284            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4285            _ => unreachable_unchecked(),
4286        }
4287    }
4288}
4289#[doc = "Insert vector element from another vector element"]
4290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"]
4291#[inline(always)]
4292#[target_feature(enable = "neon")]
4293#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4294#[rustc_legacy_const_generics(1, 3)]
4295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4296pub fn vcopy_laneq_f32<const LANE1: i32, const LANE2: i32>(
4297    a: float32x2_t,
4298    b: float32x4_t,
4299) -> float32x2_t {
4300    static_assert_uimm_bits!(LANE1, 1);
4301    static_assert_uimm_bits!(LANE2, 2);
4302    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4303    unsafe {
4304        match LANE1 & 0b1 {
4305            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4306            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4307            _ => unreachable_unchecked(),
4308        }
4309    }
4310}
4311#[doc = "Insert vector element from another vector element"]
4312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"]
4313#[inline(always)]
4314#[target_feature(enable = "neon")]
4315#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4316#[rustc_legacy_const_generics(1, 3)]
4317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4318pub fn vcopy_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x16_t) -> int8x8_t {
4319    static_assert_uimm_bits!(LANE1, 3);
4320    static_assert_uimm_bits!(LANE2, 4);
4321    let a: int8x16_t =
4322        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4323    unsafe {
4324        match LANE1 & 0b111 {
4325            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4326            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4327            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4328            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4329            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4330            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4331            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4332            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4333            _ => unreachable_unchecked(),
4334        }
4335    }
4336}
4337#[doc = "Insert vector element from another vector element"]
4338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"]
4339#[inline(always)]
4340#[target_feature(enable = "neon")]
4341#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4342#[rustc_legacy_const_generics(1, 3)]
4343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4344pub fn vcopy_laneq_s16<const LANE1: i32, const LANE2: i32>(
4345    a: int16x4_t,
4346    b: int16x8_t,
4347) -> int16x4_t {
4348    static_assert_uimm_bits!(LANE1, 2);
4349    static_assert_uimm_bits!(LANE2, 3);
4350    let a: int16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4351    unsafe {
4352        match LANE1 & 0b11 {
4353            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4354            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4355            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4356            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4357            _ => unreachable_unchecked(),
4358        }
4359    }
4360}
4361#[doc = "Insert vector element from another vector element"]
4362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"]
4363#[inline(always)]
4364#[target_feature(enable = "neon")]
4365#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4366#[rustc_legacy_const_generics(1, 3)]
4367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4368pub fn vcopy_laneq_s32<const LANE1: i32, const LANE2: i32>(
4369    a: int32x2_t,
4370    b: int32x4_t,
4371) -> int32x2_t {
4372    static_assert_uimm_bits!(LANE1, 1);
4373    static_assert_uimm_bits!(LANE2, 2);
4374    let a: int32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4375    unsafe {
4376        match LANE1 & 0b1 {
4377            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4378            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4379            _ => unreachable_unchecked(),
4380        }
4381    }
4382}
4383#[doc = "Insert vector element from another vector element"]
4384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"]
4385#[inline(always)]
4386#[target_feature(enable = "neon")]
4387#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4388#[rustc_legacy_const_generics(1, 3)]
4389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4390pub fn vcopy_laneq_u8<const LANE1: i32, const LANE2: i32>(
4391    a: uint8x8_t,
4392    b: uint8x16_t,
4393) -> uint8x8_t {
4394    static_assert_uimm_bits!(LANE1, 3);
4395    static_assert_uimm_bits!(LANE2, 4);
4396    let a: uint8x16_t =
4397        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4398    unsafe {
4399        match LANE1 & 0b111 {
4400            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4401            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4402            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4403            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4404            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4405            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4406            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4407            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4408            _ => unreachable_unchecked(),
4409        }
4410    }
4411}
4412#[doc = "Insert vector element from another vector element"]
4413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"]
4414#[inline(always)]
4415#[target_feature(enable = "neon")]
4416#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4417#[rustc_legacy_const_generics(1, 3)]
4418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4419pub fn vcopy_laneq_u16<const LANE1: i32, const LANE2: i32>(
4420    a: uint16x4_t,
4421    b: uint16x8_t,
4422) -> uint16x4_t {
4423    static_assert_uimm_bits!(LANE1, 2);
4424    static_assert_uimm_bits!(LANE2, 3);
4425    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4426    unsafe {
4427        match LANE1 & 0b11 {
4428            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4429            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4430            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4431            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4432            _ => unreachable_unchecked(),
4433        }
4434    }
4435}
4436#[doc = "Insert vector element from another vector element"]
4437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"]
4438#[inline(always)]
4439#[target_feature(enable = "neon")]
4440#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4441#[rustc_legacy_const_generics(1, 3)]
4442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4443pub fn vcopy_laneq_u32<const LANE1: i32, const LANE2: i32>(
4444    a: uint32x2_t,
4445    b: uint32x4_t,
4446) -> uint32x2_t {
4447    static_assert_uimm_bits!(LANE1, 1);
4448    static_assert_uimm_bits!(LANE2, 2);
4449    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4450    unsafe {
4451        match LANE1 & 0b1 {
4452            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4453            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4454            _ => unreachable_unchecked(),
4455        }
4456    }
4457}
4458#[doc = "Insert vector element from another vector element"]
4459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"]
4460#[inline(always)]
4461#[target_feature(enable = "neon")]
4462#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4463#[rustc_legacy_const_generics(1, 3)]
4464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4465pub fn vcopy_laneq_p8<const LANE1: i32, const LANE2: i32>(
4466    a: poly8x8_t,
4467    b: poly8x16_t,
4468) -> poly8x8_t {
4469    static_assert_uimm_bits!(LANE1, 3);
4470    static_assert_uimm_bits!(LANE2, 4);
4471    let a: poly8x16_t =
4472        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4473    unsafe {
4474        match LANE1 & 0b111 {
4475            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4476            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4477            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4478            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4479            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4480            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4481            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4482            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4483            _ => unreachable_unchecked(),
4484        }
4485    }
4486}
4487#[doc = "Insert vector element from another vector element"]
4488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"]
4489#[inline(always)]
4490#[target_feature(enable = "neon")]
4491#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4492#[rustc_legacy_const_generics(1, 3)]
4493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4494pub fn vcopy_laneq_p16<const LANE1: i32, const LANE2: i32>(
4495    a: poly16x4_t,
4496    b: poly16x8_t,
4497) -> poly16x4_t {
4498    static_assert_uimm_bits!(LANE1, 2);
4499    static_assert_uimm_bits!(LANE2, 3);
4500    let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4501    unsafe {
4502        match LANE1 & 0b11 {
4503            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4504            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4505            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4506            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4507            _ => unreachable_unchecked(),
4508        }
4509    }
4510}
4511#[doc = "Insert vector element from another vector element"]
4512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"]
4513#[inline(always)]
4514#[target_feature(enable = "neon")]
4515#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4516#[rustc_legacy_const_generics(1, 3)]
4517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4518pub fn vcopyq_lane_f32<const LANE1: i32, const LANE2: i32>(
4519    a: float32x4_t,
4520    b: float32x2_t,
4521) -> float32x4_t {
4522    static_assert_uimm_bits!(LANE1, 2);
4523    static_assert_uimm_bits!(LANE2, 1);
4524    let b: float32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
4525    unsafe {
4526        match LANE1 & 0b11 {
4527            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4528            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4529            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4530            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4531            _ => unreachable_unchecked(),
4532        }
4533    }
4534}
4535#[doc = "Insert vector element from another vector element"]
4536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"]
4537#[inline(always)]
4538#[target_feature(enable = "neon")]
4539#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4540#[rustc_legacy_const_generics(1, 3)]
4541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4542pub fn vcopyq_lane_f64<const LANE1: i32, const LANE2: i32>(
4543    a: float64x2_t,
4544    b: float64x1_t,
4545) -> float64x2_t {
4546    static_assert_uimm_bits!(LANE1, 1);
4547    static_assert!(LANE2 == 0);
4548    let b: float64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4549    unsafe {
4550        match LANE1 & 0b1 {
4551            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4552            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4553            _ => unreachable_unchecked(),
4554        }
4555    }
4556}
4557#[doc = "Insert vector element from another vector element"]
4558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"]
4559#[inline(always)]
4560#[target_feature(enable = "neon")]
4561#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4562#[rustc_legacy_const_generics(1, 3)]
4563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4564pub fn vcopyq_lane_s64<const LANE1: i32, const LANE2: i32>(
4565    a: int64x2_t,
4566    b: int64x1_t,
4567) -> int64x2_t {
4568    static_assert_uimm_bits!(LANE1, 1);
4569    static_assert!(LANE2 == 0);
4570    let b: int64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4571    unsafe {
4572        match LANE1 & 0b1 {
4573            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4574            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4575            _ => unreachable_unchecked(),
4576        }
4577    }
4578}
4579#[doc = "Insert vector element from another vector element"]
4580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"]
4581#[inline(always)]
4582#[target_feature(enable = "neon")]
4583#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4584#[rustc_legacy_const_generics(1, 3)]
4585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4586pub fn vcopyq_lane_u64<const LANE1: i32, const LANE2: i32>(
4587    a: uint64x2_t,
4588    b: uint64x1_t,
4589) -> uint64x2_t {
4590    static_assert_uimm_bits!(LANE1, 1);
4591    static_assert!(LANE2 == 0);
4592    let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4593    unsafe {
4594        match LANE1 & 0b1 {
4595            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4596            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4597            _ => unreachable_unchecked(),
4598        }
4599    }
4600}
4601#[doc = "Insert vector element from another vector element"]
4602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"]
4603#[inline(always)]
4604#[target_feature(enable = "neon")]
4605#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4606#[rustc_legacy_const_generics(1, 3)]
4607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4608pub fn vcopyq_lane_p64<const LANE1: i32, const LANE2: i32>(
4609    a: poly64x2_t,
4610    b: poly64x1_t,
4611) -> poly64x2_t {
4612    static_assert_uimm_bits!(LANE1, 1);
4613    static_assert!(LANE2 == 0);
4614    let b: poly64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4615    unsafe {
4616        match LANE1 & 0b1 {
4617            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4618            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4619            _ => unreachable_unchecked(),
4620        }
4621    }
4622}
4623#[doc = "Insert vector element from another vector element"]
4624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"]
4625#[inline(always)]
4626#[target_feature(enable = "neon")]
4627#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4628#[rustc_legacy_const_generics(1, 3)]
4629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4630pub fn vcopyq_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x8_t) -> int8x16_t {
4631    static_assert_uimm_bits!(LANE1, 4);
4632    static_assert_uimm_bits!(LANE2, 3);
4633    let b: int8x16_t =
4634        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4635    unsafe {
4636        match LANE1 & 0b1111 {
4637            0 => simd_shuffle!(
4638                a,
4639                b,
4640                [
4641                    16 + LANE2 as u32,
4642                    1,
4643                    2,
4644                    3,
4645                    4,
4646                    5,
4647                    6,
4648                    7,
4649                    8,
4650                    9,
4651                    10,
4652                    11,
4653                    12,
4654                    13,
4655                    14,
4656                    15
4657                ]
4658            ),
4659            1 => simd_shuffle!(
4660                a,
4661                b,
4662                [
4663                    0,
4664                    16 + LANE2 as u32,
4665                    2,
4666                    3,
4667                    4,
4668                    5,
4669                    6,
4670                    7,
4671                    8,
4672                    9,
4673                    10,
4674                    11,
4675                    12,
4676                    13,
4677                    14,
4678                    15
4679                ]
4680            ),
4681            2 => simd_shuffle!(
4682                a,
4683                b,
4684                [
4685                    0,
4686                    1,
4687                    16 + LANE2 as u32,
4688                    3,
4689                    4,
4690                    5,
4691                    6,
4692                    7,
4693                    8,
4694                    9,
4695                    10,
4696                    11,
4697                    12,
4698                    13,
4699                    14,
4700                    15
4701                ]
4702            ),
4703            3 => simd_shuffle!(
4704                a,
4705                b,
4706                [
4707                    0,
4708                    1,
4709                    2,
4710                    16 + LANE2 as u32,
4711                    4,
4712                    5,
4713                    6,
4714                    7,
4715                    8,
4716                    9,
4717                    10,
4718                    11,
4719                    12,
4720                    13,
4721                    14,
4722                    15
4723                ]
4724            ),
4725            4 => simd_shuffle!(
4726                a,
4727                b,
4728                [
4729                    0,
4730                    1,
4731                    2,
4732                    3,
4733                    16 + LANE2 as u32,
4734                    5,
4735                    6,
4736                    7,
4737                    8,
4738                    9,
4739                    10,
4740                    11,
4741                    12,
4742                    13,
4743                    14,
4744                    15
4745                ]
4746            ),
4747            5 => simd_shuffle!(
4748                a,
4749                b,
4750                [
4751                    0,
4752                    1,
4753                    2,
4754                    3,
4755                    4,
4756                    16 + LANE2 as u32,
4757                    6,
4758                    7,
4759                    8,
4760                    9,
4761                    10,
4762                    11,
4763                    12,
4764                    13,
4765                    14,
4766                    15
4767                ]
4768            ),
4769            6 => simd_shuffle!(
4770                a,
4771                b,
4772                [
4773                    0,
4774                    1,
4775                    2,
4776                    3,
4777                    4,
4778                    5,
4779                    16 + LANE2 as u32,
4780                    7,
4781                    8,
4782                    9,
4783                    10,
4784                    11,
4785                    12,
4786                    13,
4787                    14,
4788                    15
4789                ]
4790            ),
4791            7 => simd_shuffle!(
4792                a,
4793                b,
4794                [
4795                    0,
4796                    1,
4797                    2,
4798                    3,
4799                    4,
4800                    5,
4801                    6,
4802                    16 + LANE2 as u32,
4803                    8,
4804                    9,
4805                    10,
4806                    11,
4807                    12,
4808                    13,
4809                    14,
4810                    15
4811                ]
4812            ),
4813            8 => simd_shuffle!(
4814                a,
4815                b,
4816                [
4817                    0,
4818                    1,
4819                    2,
4820                    3,
4821                    4,
4822                    5,
4823                    6,
4824                    7,
4825                    16 + LANE2 as u32,
4826                    9,
4827                    10,
4828                    11,
4829                    12,
4830                    13,
4831                    14,
4832                    15
4833                ]
4834            ),
4835            9 => simd_shuffle!(
4836                a,
4837                b,
4838                [
4839                    0,
4840                    1,
4841                    2,
4842                    3,
4843                    4,
4844                    5,
4845                    6,
4846                    7,
4847                    8,
4848                    16 + LANE2 as u32,
4849                    10,
4850                    11,
4851                    12,
4852                    13,
4853                    14,
4854                    15
4855                ]
4856            ),
4857            10 => simd_shuffle!(
4858                a,
4859                b,
4860                [
4861                    0,
4862                    1,
4863                    2,
4864                    3,
4865                    4,
4866                    5,
4867                    6,
4868                    7,
4869                    8,
4870                    9,
4871                    16 + LANE2 as u32,
4872                    11,
4873                    12,
4874                    13,
4875                    14,
4876                    15
4877                ]
4878            ),
4879            11 => simd_shuffle!(
4880                a,
4881                b,
4882                [
4883                    0,
4884                    1,
4885                    2,
4886                    3,
4887                    4,
4888                    5,
4889                    6,
4890                    7,
4891                    8,
4892                    9,
4893                    10,
4894                    16 + LANE2 as u32,
4895                    12,
4896                    13,
4897                    14,
4898                    15
4899                ]
4900            ),
4901            12 => simd_shuffle!(
4902                a,
4903                b,
4904                [
4905                    0,
4906                    1,
4907                    2,
4908                    3,
4909                    4,
4910                    5,
4911                    6,
4912                    7,
4913                    8,
4914                    9,
4915                    10,
4916                    11,
4917                    16 + LANE2 as u32,
4918                    13,
4919                    14,
4920                    15
4921                ]
4922            ),
4923            13 => simd_shuffle!(
4924                a,
4925                b,
4926                [
4927                    0,
4928                    1,
4929                    2,
4930                    3,
4931                    4,
4932                    5,
4933                    6,
4934                    7,
4935                    8,
4936                    9,
4937                    10,
4938                    11,
4939                    12,
4940                    16 + LANE2 as u32,
4941                    14,
4942                    15
4943                ]
4944            ),
4945            14 => simd_shuffle!(
4946                a,
4947                b,
4948                [
4949                    0,
4950                    1,
4951                    2,
4952                    3,
4953                    4,
4954                    5,
4955                    6,
4956                    7,
4957                    8,
4958                    9,
4959                    10,
4960                    11,
4961                    12,
4962                    13,
4963                    16 + LANE2 as u32,
4964                    15
4965                ]
4966            ),
4967            15 => simd_shuffle!(
4968                a,
4969                b,
4970                [
4971                    0,
4972                    1,
4973                    2,
4974                    3,
4975                    4,
4976                    5,
4977                    6,
4978                    7,
4979                    8,
4980                    9,
4981                    10,
4982                    11,
4983                    12,
4984                    13,
4985                    14,
4986                    16 + LANE2 as u32
4987                ]
4988            ),
4989            _ => unreachable_unchecked(),
4990        }
4991    }
4992}
4993#[doc = "Insert vector element from another vector element"]
4994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"]
4995#[inline(always)]
4996#[target_feature(enable = "neon")]
4997#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4998#[rustc_legacy_const_generics(1, 3)]
4999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5000pub fn vcopyq_lane_s16<const LANE1: i32, const LANE2: i32>(
5001    a: int16x8_t,
5002    b: int16x4_t,
5003) -> int16x8_t {
5004    static_assert_uimm_bits!(LANE1, 3);
5005    static_assert_uimm_bits!(LANE2, 2);
5006    let b: int16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5007    unsafe {
5008        match LANE1 & 0b111 {
5009            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5010            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5011            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5012            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5013            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5014            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5015            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5016            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5017            _ => unreachable_unchecked(),
5018        }
5019    }
5020}
5021#[doc = "Insert vector element from another vector element"]
5022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"]
5023#[inline(always)]
5024#[target_feature(enable = "neon")]
5025#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5026#[rustc_legacy_const_generics(1, 3)]
5027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5028pub fn vcopyq_lane_s32<const LANE1: i32, const LANE2: i32>(
5029    a: int32x4_t,
5030    b: int32x2_t,
5031) -> int32x4_t {
5032    static_assert_uimm_bits!(LANE1, 2);
5033    static_assert_uimm_bits!(LANE2, 1);
5034    let b: int32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5035    unsafe {
5036        match LANE1 & 0b11 {
5037            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5038            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5039            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5040            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5041            _ => unreachable_unchecked(),
5042        }
5043    }
5044}
5045#[doc = "Insert vector element from another vector element"]
5046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"]
5047#[inline(always)]
5048#[target_feature(enable = "neon")]
5049#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5050#[rustc_legacy_const_generics(1, 3)]
5051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5052pub fn vcopyq_lane_u8<const LANE1: i32, const LANE2: i32>(
5053    a: uint8x16_t,
5054    b: uint8x8_t,
5055) -> uint8x16_t {
5056    static_assert_uimm_bits!(LANE1, 4);
5057    static_assert_uimm_bits!(LANE2, 3);
5058    let b: uint8x16_t =
5059        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5060    unsafe {
5061        match LANE1 & 0b1111 {
5062            0 => simd_shuffle!(
5063                a,
5064                b,
5065                [
5066                    16 + LANE2 as u32,
5067                    1,
5068                    2,
5069                    3,
5070                    4,
5071                    5,
5072                    6,
5073                    7,
5074                    8,
5075                    9,
5076                    10,
5077                    11,
5078                    12,
5079                    13,
5080                    14,
5081                    15
5082                ]
5083            ),
5084            1 => simd_shuffle!(
5085                a,
5086                b,
5087                [
5088                    0,
5089                    16 + LANE2 as u32,
5090                    2,
5091                    3,
5092                    4,
5093                    5,
5094                    6,
5095                    7,
5096                    8,
5097                    9,
5098                    10,
5099                    11,
5100                    12,
5101                    13,
5102                    14,
5103                    15
5104                ]
5105            ),
5106            2 => simd_shuffle!(
5107                a,
5108                b,
5109                [
5110                    0,
5111                    1,
5112                    16 + LANE2 as u32,
5113                    3,
5114                    4,
5115                    5,
5116                    6,
5117                    7,
5118                    8,
5119                    9,
5120                    10,
5121                    11,
5122                    12,
5123                    13,
5124                    14,
5125                    15
5126                ]
5127            ),
5128            3 => simd_shuffle!(
5129                a,
5130                b,
5131                [
5132                    0,
5133                    1,
5134                    2,
5135                    16 + LANE2 as u32,
5136                    4,
5137                    5,
5138                    6,
5139                    7,
5140                    8,
5141                    9,
5142                    10,
5143                    11,
5144                    12,
5145                    13,
5146                    14,
5147                    15
5148                ]
5149            ),
5150            4 => simd_shuffle!(
5151                a,
5152                b,
5153                [
5154                    0,
5155                    1,
5156                    2,
5157                    3,
5158                    16 + LANE2 as u32,
5159                    5,
5160                    6,
5161                    7,
5162                    8,
5163                    9,
5164                    10,
5165                    11,
5166                    12,
5167                    13,
5168                    14,
5169                    15
5170                ]
5171            ),
5172            5 => simd_shuffle!(
5173                a,
5174                b,
5175                [
5176                    0,
5177                    1,
5178                    2,
5179                    3,
5180                    4,
5181                    16 + LANE2 as u32,
5182                    6,
5183                    7,
5184                    8,
5185                    9,
5186                    10,
5187                    11,
5188                    12,
5189                    13,
5190                    14,
5191                    15
5192                ]
5193            ),
5194            6 => simd_shuffle!(
5195                a,
5196                b,
5197                [
5198                    0,
5199                    1,
5200                    2,
5201                    3,
5202                    4,
5203                    5,
5204                    16 + LANE2 as u32,
5205                    7,
5206                    8,
5207                    9,
5208                    10,
5209                    11,
5210                    12,
5211                    13,
5212                    14,
5213                    15
5214                ]
5215            ),
5216            7 => simd_shuffle!(
5217                a,
5218                b,
5219                [
5220                    0,
5221                    1,
5222                    2,
5223                    3,
5224                    4,
5225                    5,
5226                    6,
5227                    16 + LANE2 as u32,
5228                    8,
5229                    9,
5230                    10,
5231                    11,
5232                    12,
5233                    13,
5234                    14,
5235                    15
5236                ]
5237            ),
5238            8 => simd_shuffle!(
5239                a,
5240                b,
5241                [
5242                    0,
5243                    1,
5244                    2,
5245                    3,
5246                    4,
5247                    5,
5248                    6,
5249                    7,
5250                    16 + LANE2 as u32,
5251                    9,
5252                    10,
5253                    11,
5254                    12,
5255                    13,
5256                    14,
5257                    15
5258                ]
5259            ),
5260            9 => simd_shuffle!(
5261                a,
5262                b,
5263                [
5264                    0,
5265                    1,
5266                    2,
5267                    3,
5268                    4,
5269                    5,
5270                    6,
5271                    7,
5272                    8,
5273                    16 + LANE2 as u32,
5274                    10,
5275                    11,
5276                    12,
5277                    13,
5278                    14,
5279                    15
5280                ]
5281            ),
5282            10 => simd_shuffle!(
5283                a,
5284                b,
5285                [
5286                    0,
5287                    1,
5288                    2,
5289                    3,
5290                    4,
5291                    5,
5292                    6,
5293                    7,
5294                    8,
5295                    9,
5296                    16 + LANE2 as u32,
5297                    11,
5298                    12,
5299                    13,
5300                    14,
5301                    15
5302                ]
5303            ),
5304            11 => simd_shuffle!(
5305                a,
5306                b,
5307                [
5308                    0,
5309                    1,
5310                    2,
5311                    3,
5312                    4,
5313                    5,
5314                    6,
5315                    7,
5316                    8,
5317                    9,
5318                    10,
5319                    16 + LANE2 as u32,
5320                    12,
5321                    13,
5322                    14,
5323                    15
5324                ]
5325            ),
5326            12 => simd_shuffle!(
5327                a,
5328                b,
5329                [
5330                    0,
5331                    1,
5332                    2,
5333                    3,
5334                    4,
5335                    5,
5336                    6,
5337                    7,
5338                    8,
5339                    9,
5340                    10,
5341                    11,
5342                    16 + LANE2 as u32,
5343                    13,
5344                    14,
5345                    15
5346                ]
5347            ),
5348            13 => simd_shuffle!(
5349                a,
5350                b,
5351                [
5352                    0,
5353                    1,
5354                    2,
5355                    3,
5356                    4,
5357                    5,
5358                    6,
5359                    7,
5360                    8,
5361                    9,
5362                    10,
5363                    11,
5364                    12,
5365                    16 + LANE2 as u32,
5366                    14,
5367                    15
5368                ]
5369            ),
5370            14 => simd_shuffle!(
5371                a,
5372                b,
5373                [
5374                    0,
5375                    1,
5376                    2,
5377                    3,
5378                    4,
5379                    5,
5380                    6,
5381                    7,
5382                    8,
5383                    9,
5384                    10,
5385                    11,
5386                    12,
5387                    13,
5388                    16 + LANE2 as u32,
5389                    15
5390                ]
5391            ),
5392            15 => simd_shuffle!(
5393                a,
5394                b,
5395                [
5396                    0,
5397                    1,
5398                    2,
5399                    3,
5400                    4,
5401                    5,
5402                    6,
5403                    7,
5404                    8,
5405                    9,
5406                    10,
5407                    11,
5408                    12,
5409                    13,
5410                    14,
5411                    16 + LANE2 as u32
5412                ]
5413            ),
5414            _ => unreachable_unchecked(),
5415        }
5416    }
5417}
5418#[doc = "Insert vector element from another vector element"]
5419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"]
5420#[inline(always)]
5421#[target_feature(enable = "neon")]
5422#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5423#[rustc_legacy_const_generics(1, 3)]
5424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5425pub fn vcopyq_lane_u16<const LANE1: i32, const LANE2: i32>(
5426    a: uint16x8_t,
5427    b: uint16x4_t,
5428) -> uint16x8_t {
5429    static_assert_uimm_bits!(LANE1, 3);
5430    static_assert_uimm_bits!(LANE2, 2);
5431    let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5432    unsafe {
5433        match LANE1 & 0b111 {
5434            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5435            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5436            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5437            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5438            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5439            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5440            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5441            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5442            _ => unreachable_unchecked(),
5443        }
5444    }
5445}
5446#[doc = "Insert vector element from another vector element"]
5447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"]
5448#[inline(always)]
5449#[target_feature(enable = "neon")]
5450#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5451#[rustc_legacy_const_generics(1, 3)]
5452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5453pub fn vcopyq_lane_u32<const LANE1: i32, const LANE2: i32>(
5454    a: uint32x4_t,
5455    b: uint32x2_t,
5456) -> uint32x4_t {
5457    static_assert_uimm_bits!(LANE1, 2);
5458    static_assert_uimm_bits!(LANE2, 1);
5459    let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5460    unsafe {
5461        match LANE1 & 0b11 {
5462            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5463            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5464            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5465            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5466            _ => unreachable_unchecked(),
5467        }
5468    }
5469}
5470#[doc = "Insert vector element from another vector element"]
5471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"]
5472#[inline(always)]
5473#[target_feature(enable = "neon")]
5474#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5475#[rustc_legacy_const_generics(1, 3)]
5476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5477pub fn vcopyq_lane_p8<const LANE1: i32, const LANE2: i32>(
5478    a: poly8x16_t,
5479    b: poly8x8_t,
5480) -> poly8x16_t {
5481    static_assert_uimm_bits!(LANE1, 4);
5482    static_assert_uimm_bits!(LANE2, 3);
5483    let b: poly8x16_t =
5484        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5485    unsafe {
5486        match LANE1 & 0b1111 {
5487            0 => simd_shuffle!(
5488                a,
5489                b,
5490                [
5491                    16 + LANE2 as u32,
5492                    1,
5493                    2,
5494                    3,
5495                    4,
5496                    5,
5497                    6,
5498                    7,
5499                    8,
5500                    9,
5501                    10,
5502                    11,
5503                    12,
5504                    13,
5505                    14,
5506                    15
5507                ]
5508            ),
5509            1 => simd_shuffle!(
5510                a,
5511                b,
5512                [
5513                    0,
5514                    16 + LANE2 as u32,
5515                    2,
5516                    3,
5517                    4,
5518                    5,
5519                    6,
5520                    7,
5521                    8,
5522                    9,
5523                    10,
5524                    11,
5525                    12,
5526                    13,
5527                    14,
5528                    15
5529                ]
5530            ),
5531            2 => simd_shuffle!(
5532                a,
5533                b,
5534                [
5535                    0,
5536                    1,
5537                    16 + LANE2 as u32,
5538                    3,
5539                    4,
5540                    5,
5541                    6,
5542                    7,
5543                    8,
5544                    9,
5545                    10,
5546                    11,
5547                    12,
5548                    13,
5549                    14,
5550                    15
5551                ]
5552            ),
5553            3 => simd_shuffle!(
5554                a,
5555                b,
5556                [
5557                    0,
5558                    1,
5559                    2,
5560                    16 + LANE2 as u32,
5561                    4,
5562                    5,
5563                    6,
5564                    7,
5565                    8,
5566                    9,
5567                    10,
5568                    11,
5569                    12,
5570                    13,
5571                    14,
5572                    15
5573                ]
5574            ),
5575            4 => simd_shuffle!(
5576                a,
5577                b,
5578                [
5579                    0,
5580                    1,
5581                    2,
5582                    3,
5583                    16 + LANE2 as u32,
5584                    5,
5585                    6,
5586                    7,
5587                    8,
5588                    9,
5589                    10,
5590                    11,
5591                    12,
5592                    13,
5593                    14,
5594                    15
5595                ]
5596            ),
5597            5 => simd_shuffle!(
5598                a,
5599                b,
5600                [
5601                    0,
5602                    1,
5603                    2,
5604                    3,
5605                    4,
5606                    16 + LANE2 as u32,
5607                    6,
5608                    7,
5609                    8,
5610                    9,
5611                    10,
5612                    11,
5613                    12,
5614                    13,
5615                    14,
5616                    15
5617                ]
5618            ),
5619            6 => simd_shuffle!(
5620                a,
5621                b,
5622                [
5623                    0,
5624                    1,
5625                    2,
5626                    3,
5627                    4,
5628                    5,
5629                    16 + LANE2 as u32,
5630                    7,
5631                    8,
5632                    9,
5633                    10,
5634                    11,
5635                    12,
5636                    13,
5637                    14,
5638                    15
5639                ]
5640            ),
5641            7 => simd_shuffle!(
5642                a,
5643                b,
5644                [
5645                    0,
5646                    1,
5647                    2,
5648                    3,
5649                    4,
5650                    5,
5651                    6,
5652                    16 + LANE2 as u32,
5653                    8,
5654                    9,
5655                    10,
5656                    11,
5657                    12,
5658                    13,
5659                    14,
5660                    15
5661                ]
5662            ),
5663            8 => simd_shuffle!(
5664                a,
5665                b,
5666                [
5667                    0,
5668                    1,
5669                    2,
5670                    3,
5671                    4,
5672                    5,
5673                    6,
5674                    7,
5675                    16 + LANE2 as u32,
5676                    9,
5677                    10,
5678                    11,
5679                    12,
5680                    13,
5681                    14,
5682                    15
5683                ]
5684            ),
5685            9 => simd_shuffle!(
5686                a,
5687                b,
5688                [
5689                    0,
5690                    1,
5691                    2,
5692                    3,
5693                    4,
5694                    5,
5695                    6,
5696                    7,
5697                    8,
5698                    16 + LANE2 as u32,
5699                    10,
5700                    11,
5701                    12,
5702                    13,
5703                    14,
5704                    15
5705                ]
5706            ),
5707            10 => simd_shuffle!(
5708                a,
5709                b,
5710                [
5711                    0,
5712                    1,
5713                    2,
5714                    3,
5715                    4,
5716                    5,
5717                    6,
5718                    7,
5719                    8,
5720                    9,
5721                    16 + LANE2 as u32,
5722                    11,
5723                    12,
5724                    13,
5725                    14,
5726                    15
5727                ]
5728            ),
5729            11 => simd_shuffle!(
5730                a,
5731                b,
5732                [
5733                    0,
5734                    1,
5735                    2,
5736                    3,
5737                    4,
5738                    5,
5739                    6,
5740                    7,
5741                    8,
5742                    9,
5743                    10,
5744                    16 + LANE2 as u32,
5745                    12,
5746                    13,
5747                    14,
5748                    15
5749                ]
5750            ),
5751            12 => simd_shuffle!(
5752                a,
5753                b,
5754                [
5755                    0,
5756                    1,
5757                    2,
5758                    3,
5759                    4,
5760                    5,
5761                    6,
5762                    7,
5763                    8,
5764                    9,
5765                    10,
5766                    11,
5767                    16 + LANE2 as u32,
5768                    13,
5769                    14,
5770                    15
5771                ]
5772            ),
5773            13 => simd_shuffle!(
5774                a,
5775                b,
5776                [
5777                    0,
5778                    1,
5779                    2,
5780                    3,
5781                    4,
5782                    5,
5783                    6,
5784                    7,
5785                    8,
5786                    9,
5787                    10,
5788                    11,
5789                    12,
5790                    16 + LANE2 as u32,
5791                    14,
5792                    15
5793                ]
5794            ),
5795            14 => simd_shuffle!(
5796                a,
5797                b,
5798                [
5799                    0,
5800                    1,
5801                    2,
5802                    3,
5803                    4,
5804                    5,
5805                    6,
5806                    7,
5807                    8,
5808                    9,
5809                    10,
5810                    11,
5811                    12,
5812                    13,
5813                    16 + LANE2 as u32,
5814                    15
5815                ]
5816            ),
5817            15 => simd_shuffle!(
5818                a,
5819                b,
5820                [
5821                    0,
5822                    1,
5823                    2,
5824                    3,
5825                    4,
5826                    5,
5827                    6,
5828                    7,
5829                    8,
5830                    9,
5831                    10,
5832                    11,
5833                    12,
5834                    13,
5835                    14,
5836                    16 + LANE2 as u32
5837                ]
5838            ),
5839            _ => unreachable_unchecked(),
5840        }
5841    }
5842}
5843#[doc = "Insert vector element from another vector element"]
5844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"]
5845#[inline(always)]
5846#[target_feature(enable = "neon")]
5847#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5848#[rustc_legacy_const_generics(1, 3)]
5849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5850pub fn vcopyq_lane_p16<const LANE1: i32, const LANE2: i32>(
5851    a: poly16x8_t,
5852    b: poly16x4_t,
5853) -> poly16x8_t {
5854    static_assert_uimm_bits!(LANE1, 3);
5855    static_assert_uimm_bits!(LANE2, 2);
5856    let b: poly16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5857    unsafe {
5858        match LANE1 & 0b111 {
5859            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5860            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5861            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5862            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5863            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5864            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5865            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5866            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5867            _ => unreachable_unchecked(),
5868        }
5869    }
5870}
5871#[doc = "Insert vector element from another vector element"]
5872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"]
5873#[inline(always)]
5874#[target_feature(enable = "neon")]
5875#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5876#[rustc_legacy_const_generics(1, 3)]
5877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5878pub fn vcopyq_laneq_f32<const LANE1: i32, const LANE2: i32>(
5879    a: float32x4_t,
5880    b: float32x4_t,
5881) -> float32x4_t {
5882    static_assert_uimm_bits!(LANE1, 2);
5883    static_assert_uimm_bits!(LANE2, 2);
5884    unsafe {
5885        match LANE1 & 0b11 {
5886            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5887            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5888            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5889            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5890            _ => unreachable_unchecked(),
5891        }
5892    }
5893}
5894#[doc = "Insert vector element from another vector element"]
5895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"]
5896#[inline(always)]
5897#[target_feature(enable = "neon")]
5898#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5899#[rustc_legacy_const_generics(1, 3)]
5900#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5901pub fn vcopyq_laneq_f64<const LANE1: i32, const LANE2: i32>(
5902    a: float64x2_t,
5903    b: float64x2_t,
5904) -> float64x2_t {
5905    static_assert_uimm_bits!(LANE1, 1);
5906    static_assert_uimm_bits!(LANE2, 1);
5907    unsafe {
5908        match LANE1 & 0b1 {
5909            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
5910            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
5911            _ => unreachable_unchecked(),
5912        }
5913    }
5914}
5915#[doc = "Insert vector element from another vector element"]
5916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"]
5917#[inline(always)]
5918#[target_feature(enable = "neon")]
5919#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5920#[rustc_legacy_const_generics(1, 3)]
5921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5922pub fn vcopyq_laneq_s8<const LANE1: i32, const LANE2: i32>(
5923    a: int8x16_t,
5924    b: int8x16_t,
5925) -> int8x16_t {
5926    static_assert_uimm_bits!(LANE1, 4);
5927    static_assert_uimm_bits!(LANE2, 4);
5928    unsafe {
5929        match LANE1 & 0b1111 {
5930            0 => simd_shuffle!(
5931                a,
5932                b,
5933                [
5934                    16 + LANE2 as u32,
5935                    1,
5936                    2,
5937                    3,
5938                    4,
5939                    5,
5940                    6,
5941                    7,
5942                    8,
5943                    9,
5944                    10,
5945                    11,
5946                    12,
5947                    13,
5948                    14,
5949                    15
5950                ]
5951            ),
5952            1 => simd_shuffle!(
5953                a,
5954                b,
5955                [
5956                    0,
5957                    16 + LANE2 as u32,
5958                    2,
5959                    3,
5960                    4,
5961                    5,
5962                    6,
5963                    7,
5964                    8,
5965                    9,
5966                    10,
5967                    11,
5968                    12,
5969                    13,
5970                    14,
5971                    15
5972                ]
5973            ),
5974            2 => simd_shuffle!(
5975                a,
5976                b,
5977                [
5978                    0,
5979                    1,
5980                    16 + LANE2 as u32,
5981                    3,
5982                    4,
5983                    5,
5984                    6,
5985                    7,
5986                    8,
5987                    9,
5988                    10,
5989                    11,
5990                    12,
5991                    13,
5992                    14,
5993                    15
5994                ]
5995            ),
5996            3 => simd_shuffle!(
5997                a,
5998                b,
5999                [
6000                    0,
6001                    1,
6002                    2,
6003                    16 + LANE2 as u32,
6004                    4,
6005                    5,
6006                    6,
6007                    7,
6008                    8,
6009                    9,
6010                    10,
6011                    11,
6012                    12,
6013                    13,
6014                    14,
6015                    15
6016                ]
6017            ),
6018            4 => simd_shuffle!(
6019                a,
6020                b,
6021                [
6022                    0,
6023                    1,
6024                    2,
6025                    3,
6026                    16 + LANE2 as u32,
6027                    5,
6028                    6,
6029                    7,
6030                    8,
6031                    9,
6032                    10,
6033                    11,
6034                    12,
6035                    13,
6036                    14,
6037                    15
6038                ]
6039            ),
6040            5 => simd_shuffle!(
6041                a,
6042                b,
6043                [
6044                    0,
6045                    1,
6046                    2,
6047                    3,
6048                    4,
6049                    16 + LANE2 as u32,
6050                    6,
6051                    7,
6052                    8,
6053                    9,
6054                    10,
6055                    11,
6056                    12,
6057                    13,
6058                    14,
6059                    15
6060                ]
6061            ),
6062            6 => simd_shuffle!(
6063                a,
6064                b,
6065                [
6066                    0,
6067                    1,
6068                    2,
6069                    3,
6070                    4,
6071                    5,
6072                    16 + LANE2 as u32,
6073                    7,
6074                    8,
6075                    9,
6076                    10,
6077                    11,
6078                    12,
6079                    13,
6080                    14,
6081                    15
6082                ]
6083            ),
6084            7 => simd_shuffle!(
6085                a,
6086                b,
6087                [
6088                    0,
6089                    1,
6090                    2,
6091                    3,
6092                    4,
6093                    5,
6094                    6,
6095                    16 + LANE2 as u32,
6096                    8,
6097                    9,
6098                    10,
6099                    11,
6100                    12,
6101                    13,
6102                    14,
6103                    15
6104                ]
6105            ),
6106            8 => simd_shuffle!(
6107                a,
6108                b,
6109                [
6110                    0,
6111                    1,
6112                    2,
6113                    3,
6114                    4,
6115                    5,
6116                    6,
6117                    7,
6118                    16 + LANE2 as u32,
6119                    9,
6120                    10,
6121                    11,
6122                    12,
6123                    13,
6124                    14,
6125                    15
6126                ]
6127            ),
6128            9 => simd_shuffle!(
6129                a,
6130                b,
6131                [
6132                    0,
6133                    1,
6134                    2,
6135                    3,
6136                    4,
6137                    5,
6138                    6,
6139                    7,
6140                    8,
6141                    16 + LANE2 as u32,
6142                    10,
6143                    11,
6144                    12,
6145                    13,
6146                    14,
6147                    15
6148                ]
6149            ),
6150            10 => simd_shuffle!(
6151                a,
6152                b,
6153                [
6154                    0,
6155                    1,
6156                    2,
6157                    3,
6158                    4,
6159                    5,
6160                    6,
6161                    7,
6162                    8,
6163                    9,
6164                    16 + LANE2 as u32,
6165                    11,
6166                    12,
6167                    13,
6168                    14,
6169                    15
6170                ]
6171            ),
6172            11 => simd_shuffle!(
6173                a,
6174                b,
6175                [
6176                    0,
6177                    1,
6178                    2,
6179                    3,
6180                    4,
6181                    5,
6182                    6,
6183                    7,
6184                    8,
6185                    9,
6186                    10,
6187                    16 + LANE2 as u32,
6188                    12,
6189                    13,
6190                    14,
6191                    15
6192                ]
6193            ),
6194            12 => simd_shuffle!(
6195                a,
6196                b,
6197                [
6198                    0,
6199                    1,
6200                    2,
6201                    3,
6202                    4,
6203                    5,
6204                    6,
6205                    7,
6206                    8,
6207                    9,
6208                    10,
6209                    11,
6210                    16 + LANE2 as u32,
6211                    13,
6212                    14,
6213                    15
6214                ]
6215            ),
6216            13 => simd_shuffle!(
6217                a,
6218                b,
6219                [
6220                    0,
6221                    1,
6222                    2,
6223                    3,
6224                    4,
6225                    5,
6226                    6,
6227                    7,
6228                    8,
6229                    9,
6230                    10,
6231                    11,
6232                    12,
6233                    16 + LANE2 as u32,
6234                    14,
6235                    15
6236                ]
6237            ),
6238            14 => simd_shuffle!(
6239                a,
6240                b,
6241                [
6242                    0,
6243                    1,
6244                    2,
6245                    3,
6246                    4,
6247                    5,
6248                    6,
6249                    7,
6250                    8,
6251                    9,
6252                    10,
6253                    11,
6254                    12,
6255                    13,
6256                    16 + LANE2 as u32,
6257                    15
6258                ]
6259            ),
6260            15 => simd_shuffle!(
6261                a,
6262                b,
6263                [
6264                    0,
6265                    1,
6266                    2,
6267                    3,
6268                    4,
6269                    5,
6270                    6,
6271                    7,
6272                    8,
6273                    9,
6274                    10,
6275                    11,
6276                    12,
6277                    13,
6278                    14,
6279                    16 + LANE2 as u32
6280                ]
6281            ),
6282            _ => unreachable_unchecked(),
6283        }
6284    }
6285}
6286#[doc = "Insert vector element from another vector element"]
6287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"]
6288#[inline(always)]
6289#[target_feature(enable = "neon")]
6290#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6291#[rustc_legacy_const_generics(1, 3)]
6292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6293pub fn vcopyq_laneq_s16<const LANE1: i32, const LANE2: i32>(
6294    a: int16x8_t,
6295    b: int16x8_t,
6296) -> int16x8_t {
6297    static_assert_uimm_bits!(LANE1, 3);
6298    static_assert_uimm_bits!(LANE2, 3);
6299    unsafe {
6300        match LANE1 & 0b111 {
6301            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6302            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6303            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6304            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6305            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6306            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6307            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6308            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6309            _ => unreachable_unchecked(),
6310        }
6311    }
6312}
6313#[doc = "Insert vector element from another vector element"]
6314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"]
6315#[inline(always)]
6316#[target_feature(enable = "neon")]
6317#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6318#[rustc_legacy_const_generics(1, 3)]
6319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6320pub fn vcopyq_laneq_s32<const LANE1: i32, const LANE2: i32>(
6321    a: int32x4_t,
6322    b: int32x4_t,
6323) -> int32x4_t {
6324    static_assert_uimm_bits!(LANE1, 2);
6325    static_assert_uimm_bits!(LANE2, 2);
6326    unsafe {
6327        match LANE1 & 0b11 {
6328            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6329            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6330            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6331            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6332            _ => unreachable_unchecked(),
6333        }
6334    }
6335}
6336#[doc = "Insert vector element from another vector element"]
6337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"]
6338#[inline(always)]
6339#[target_feature(enable = "neon")]
6340#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6341#[rustc_legacy_const_generics(1, 3)]
6342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6343pub fn vcopyq_laneq_s64<const LANE1: i32, const LANE2: i32>(
6344    a: int64x2_t,
6345    b: int64x2_t,
6346) -> int64x2_t {
6347    static_assert_uimm_bits!(LANE1, 1);
6348    static_assert_uimm_bits!(LANE2, 1);
6349    unsafe {
6350        match LANE1 & 0b1 {
6351            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6352            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6353            _ => unreachable_unchecked(),
6354        }
6355    }
6356}
6357#[doc = "Insert vector element from another vector element"]
6358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"]
6359#[inline(always)]
6360#[target_feature(enable = "neon")]
6361#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6362#[rustc_legacy_const_generics(1, 3)]
6363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6364pub fn vcopyq_laneq_u8<const LANE1: i32, const LANE2: i32>(
6365    a: uint8x16_t,
6366    b: uint8x16_t,
6367) -> uint8x16_t {
6368    static_assert_uimm_bits!(LANE1, 4);
6369    static_assert_uimm_bits!(LANE2, 4);
6370    unsafe {
6371        match LANE1 & 0b1111 {
6372            0 => simd_shuffle!(
6373                a,
6374                b,
6375                [
6376                    16 + LANE2 as u32,
6377                    1,
6378                    2,
6379                    3,
6380                    4,
6381                    5,
6382                    6,
6383                    7,
6384                    8,
6385                    9,
6386                    10,
6387                    11,
6388                    12,
6389                    13,
6390                    14,
6391                    15
6392                ]
6393            ),
6394            1 => simd_shuffle!(
6395                a,
6396                b,
6397                [
6398                    0,
6399                    16 + LANE2 as u32,
6400                    2,
6401                    3,
6402                    4,
6403                    5,
6404                    6,
6405                    7,
6406                    8,
6407                    9,
6408                    10,
6409                    11,
6410                    12,
6411                    13,
6412                    14,
6413                    15
6414                ]
6415            ),
6416            2 => simd_shuffle!(
6417                a,
6418                b,
6419                [
6420                    0,
6421                    1,
6422                    16 + LANE2 as u32,
6423                    3,
6424                    4,
6425                    5,
6426                    6,
6427                    7,
6428                    8,
6429                    9,
6430                    10,
6431                    11,
6432                    12,
6433                    13,
6434                    14,
6435                    15
6436                ]
6437            ),
6438            3 => simd_shuffle!(
6439                a,
6440                b,
6441                [
6442                    0,
6443                    1,
6444                    2,
6445                    16 + LANE2 as u32,
6446                    4,
6447                    5,
6448                    6,
6449                    7,
6450                    8,
6451                    9,
6452                    10,
6453                    11,
6454                    12,
6455                    13,
6456                    14,
6457                    15
6458                ]
6459            ),
6460            4 => simd_shuffle!(
6461                a,
6462                b,
6463                [
6464                    0,
6465                    1,
6466                    2,
6467                    3,
6468                    16 + LANE2 as u32,
6469                    5,
6470                    6,
6471                    7,
6472                    8,
6473                    9,
6474                    10,
6475                    11,
6476                    12,
6477                    13,
6478                    14,
6479                    15
6480                ]
6481            ),
6482            5 => simd_shuffle!(
6483                a,
6484                b,
6485                [
6486                    0,
6487                    1,
6488                    2,
6489                    3,
6490                    4,
6491                    16 + LANE2 as u32,
6492                    6,
6493                    7,
6494                    8,
6495                    9,
6496                    10,
6497                    11,
6498                    12,
6499                    13,
6500                    14,
6501                    15
6502                ]
6503            ),
6504            6 => simd_shuffle!(
6505                a,
6506                b,
6507                [
6508                    0,
6509                    1,
6510                    2,
6511                    3,
6512                    4,
6513                    5,
6514                    16 + LANE2 as u32,
6515                    7,
6516                    8,
6517                    9,
6518                    10,
6519                    11,
6520                    12,
6521                    13,
6522                    14,
6523                    15
6524                ]
6525            ),
6526            7 => simd_shuffle!(
6527                a,
6528                b,
6529                [
6530                    0,
6531                    1,
6532                    2,
6533                    3,
6534                    4,
6535                    5,
6536                    6,
6537                    16 + LANE2 as u32,
6538                    8,
6539                    9,
6540                    10,
6541                    11,
6542                    12,
6543                    13,
6544                    14,
6545                    15
6546                ]
6547            ),
6548            8 => simd_shuffle!(
6549                a,
6550                b,
6551                [
6552                    0,
6553                    1,
6554                    2,
6555                    3,
6556                    4,
6557                    5,
6558                    6,
6559                    7,
6560                    16 + LANE2 as u32,
6561                    9,
6562                    10,
6563                    11,
6564                    12,
6565                    13,
6566                    14,
6567                    15
6568                ]
6569            ),
6570            9 => simd_shuffle!(
6571                a,
6572                b,
6573                [
6574                    0,
6575                    1,
6576                    2,
6577                    3,
6578                    4,
6579                    5,
6580                    6,
6581                    7,
6582                    8,
6583                    16 + LANE2 as u32,
6584                    10,
6585                    11,
6586                    12,
6587                    13,
6588                    14,
6589                    15
6590                ]
6591            ),
6592            10 => simd_shuffle!(
6593                a,
6594                b,
6595                [
6596                    0,
6597                    1,
6598                    2,
6599                    3,
6600                    4,
6601                    5,
6602                    6,
6603                    7,
6604                    8,
6605                    9,
6606                    16 + LANE2 as u32,
6607                    11,
6608                    12,
6609                    13,
6610                    14,
6611                    15
6612                ]
6613            ),
6614            11 => simd_shuffle!(
6615                a,
6616                b,
6617                [
6618                    0,
6619                    1,
6620                    2,
6621                    3,
6622                    4,
6623                    5,
6624                    6,
6625                    7,
6626                    8,
6627                    9,
6628                    10,
6629                    16 + LANE2 as u32,
6630                    12,
6631                    13,
6632                    14,
6633                    15
6634                ]
6635            ),
6636            12 => simd_shuffle!(
6637                a,
6638                b,
6639                [
6640                    0,
6641                    1,
6642                    2,
6643                    3,
6644                    4,
6645                    5,
6646                    6,
6647                    7,
6648                    8,
6649                    9,
6650                    10,
6651                    11,
6652                    16 + LANE2 as u32,
6653                    13,
6654                    14,
6655                    15
6656                ]
6657            ),
6658            13 => simd_shuffle!(
6659                a,
6660                b,
6661                [
6662                    0,
6663                    1,
6664                    2,
6665                    3,
6666                    4,
6667                    5,
6668                    6,
6669                    7,
6670                    8,
6671                    9,
6672                    10,
6673                    11,
6674                    12,
6675                    16 + LANE2 as u32,
6676                    14,
6677                    15
6678                ]
6679            ),
6680            14 => simd_shuffle!(
6681                a,
6682                b,
6683                [
6684                    0,
6685                    1,
6686                    2,
6687                    3,
6688                    4,
6689                    5,
6690                    6,
6691                    7,
6692                    8,
6693                    9,
6694                    10,
6695                    11,
6696                    12,
6697                    13,
6698                    16 + LANE2 as u32,
6699                    15
6700                ]
6701            ),
6702            15 => simd_shuffle!(
6703                a,
6704                b,
6705                [
6706                    0,
6707                    1,
6708                    2,
6709                    3,
6710                    4,
6711                    5,
6712                    6,
6713                    7,
6714                    8,
6715                    9,
6716                    10,
6717                    11,
6718                    12,
6719                    13,
6720                    14,
6721                    16 + LANE2 as u32
6722                ]
6723            ),
6724            _ => unreachable_unchecked(),
6725        }
6726    }
6727}
6728#[doc = "Insert vector element from another vector element"]
6729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"]
6730#[inline(always)]
6731#[target_feature(enable = "neon")]
6732#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6733#[rustc_legacy_const_generics(1, 3)]
6734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6735pub fn vcopyq_laneq_u16<const LANE1: i32, const LANE2: i32>(
6736    a: uint16x8_t,
6737    b: uint16x8_t,
6738) -> uint16x8_t {
6739    static_assert_uimm_bits!(LANE1, 3);
6740    static_assert_uimm_bits!(LANE2, 3);
6741    unsafe {
6742        match LANE1 & 0b111 {
6743            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6744            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6745            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6746            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6747            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6748            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6749            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6750            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6751            _ => unreachable_unchecked(),
6752        }
6753    }
6754}
6755#[doc = "Insert vector element from another vector element"]
6756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"]
6757#[inline(always)]
6758#[target_feature(enable = "neon")]
6759#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6760#[rustc_legacy_const_generics(1, 3)]
6761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6762pub fn vcopyq_laneq_u32<const LANE1: i32, const LANE2: i32>(
6763    a: uint32x4_t,
6764    b: uint32x4_t,
6765) -> uint32x4_t {
6766    static_assert_uimm_bits!(LANE1, 2);
6767    static_assert_uimm_bits!(LANE2, 2);
6768    unsafe {
6769        match LANE1 & 0b11 {
6770            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6771            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6772            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6773            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6774            _ => unreachable_unchecked(),
6775        }
6776    }
6777}
6778#[doc = "Insert vector element from another vector element"]
6779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"]
6780#[inline(always)]
6781#[target_feature(enable = "neon")]
6782#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6783#[rustc_legacy_const_generics(1, 3)]
6784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6785pub fn vcopyq_laneq_u64<const LANE1: i32, const LANE2: i32>(
6786    a: uint64x2_t,
6787    b: uint64x2_t,
6788) -> uint64x2_t {
6789    static_assert_uimm_bits!(LANE1, 1);
6790    static_assert_uimm_bits!(LANE2, 1);
6791    unsafe {
6792        match LANE1 & 0b1 {
6793            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6794            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6795            _ => unreachable_unchecked(),
6796        }
6797    }
6798}
6799#[doc = "Insert vector element from another vector element"]
6800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"]
6801#[inline(always)]
6802#[target_feature(enable = "neon")]
6803#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6804#[rustc_legacy_const_generics(1, 3)]
6805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6806pub fn vcopyq_laneq_p8<const LANE1: i32, const LANE2: i32>(
6807    a: poly8x16_t,
6808    b: poly8x16_t,
6809) -> poly8x16_t {
6810    static_assert_uimm_bits!(LANE1, 4);
6811    static_assert_uimm_bits!(LANE2, 4);
6812    unsafe {
6813        match LANE1 & 0b1111 {
6814            0 => simd_shuffle!(
6815                a,
6816                b,
6817                [
6818                    16 + LANE2 as u32,
6819                    1,
6820                    2,
6821                    3,
6822                    4,
6823                    5,
6824                    6,
6825                    7,
6826                    8,
6827                    9,
6828                    10,
6829                    11,
6830                    12,
6831                    13,
6832                    14,
6833                    15
6834                ]
6835            ),
6836            1 => simd_shuffle!(
6837                a,
6838                b,
6839                [
6840                    0,
6841                    16 + LANE2 as u32,
6842                    2,
6843                    3,
6844                    4,
6845                    5,
6846                    6,
6847                    7,
6848                    8,
6849                    9,
6850                    10,
6851                    11,
6852                    12,
6853                    13,
6854                    14,
6855                    15
6856                ]
6857            ),
6858            2 => simd_shuffle!(
6859                a,
6860                b,
6861                [
6862                    0,
6863                    1,
6864                    16 + LANE2 as u32,
6865                    3,
6866                    4,
6867                    5,
6868                    6,
6869                    7,
6870                    8,
6871                    9,
6872                    10,
6873                    11,
6874                    12,
6875                    13,
6876                    14,
6877                    15
6878                ]
6879            ),
6880            3 => simd_shuffle!(
6881                a,
6882                b,
6883                [
6884                    0,
6885                    1,
6886                    2,
6887                    16 + LANE2 as u32,
6888                    4,
6889                    5,
6890                    6,
6891                    7,
6892                    8,
6893                    9,
6894                    10,
6895                    11,
6896                    12,
6897                    13,
6898                    14,
6899                    15
6900                ]
6901            ),
6902            4 => simd_shuffle!(
6903                a,
6904                b,
6905                [
6906                    0,
6907                    1,
6908                    2,
6909                    3,
6910                    16 + LANE2 as u32,
6911                    5,
6912                    6,
6913                    7,
6914                    8,
6915                    9,
6916                    10,
6917                    11,
6918                    12,
6919                    13,
6920                    14,
6921                    15
6922                ]
6923            ),
6924            5 => simd_shuffle!(
6925                a,
6926                b,
6927                [
6928                    0,
6929                    1,
6930                    2,
6931                    3,
6932                    4,
6933                    16 + LANE2 as u32,
6934                    6,
6935                    7,
6936                    8,
6937                    9,
6938                    10,
6939                    11,
6940                    12,
6941                    13,
6942                    14,
6943                    15
6944                ]
6945            ),
6946            6 => simd_shuffle!(
6947                a,
6948                b,
6949                [
6950                    0,
6951                    1,
6952                    2,
6953                    3,
6954                    4,
6955                    5,
6956                    16 + LANE2 as u32,
6957                    7,
6958                    8,
6959                    9,
6960                    10,
6961                    11,
6962                    12,
6963                    13,
6964                    14,
6965                    15
6966                ]
6967            ),
6968            7 => simd_shuffle!(
6969                a,
6970                b,
6971                [
6972                    0,
6973                    1,
6974                    2,
6975                    3,
6976                    4,
6977                    5,
6978                    6,
6979                    16 + LANE2 as u32,
6980                    8,
6981                    9,
6982                    10,
6983                    11,
6984                    12,
6985                    13,
6986                    14,
6987                    15
6988                ]
6989            ),
6990            8 => simd_shuffle!(
6991                a,
6992                b,
6993                [
6994                    0,
6995                    1,
6996                    2,
6997                    3,
6998                    4,
6999                    5,
7000                    6,
7001                    7,
7002                    16 + LANE2 as u32,
7003                    9,
7004                    10,
7005                    11,
7006                    12,
7007                    13,
7008                    14,
7009                    15
7010                ]
7011            ),
7012            9 => simd_shuffle!(
7013                a,
7014                b,
7015                [
7016                    0,
7017                    1,
7018                    2,
7019                    3,
7020                    4,
7021                    5,
7022                    6,
7023                    7,
7024                    8,
7025                    16 + LANE2 as u32,
7026                    10,
7027                    11,
7028                    12,
7029                    13,
7030                    14,
7031                    15
7032                ]
7033            ),
7034            10 => simd_shuffle!(
7035                a,
7036                b,
7037                [
7038                    0,
7039                    1,
7040                    2,
7041                    3,
7042                    4,
7043                    5,
7044                    6,
7045                    7,
7046                    8,
7047                    9,
7048                    16 + LANE2 as u32,
7049                    11,
7050                    12,
7051                    13,
7052                    14,
7053                    15
7054                ]
7055            ),
7056            11 => simd_shuffle!(
7057                a,
7058                b,
7059                [
7060                    0,
7061                    1,
7062                    2,
7063                    3,
7064                    4,
7065                    5,
7066                    6,
7067                    7,
7068                    8,
7069                    9,
7070                    10,
7071                    16 + LANE2 as u32,
7072                    12,
7073                    13,
7074                    14,
7075                    15
7076                ]
7077            ),
7078            12 => simd_shuffle!(
7079                a,
7080                b,
7081                [
7082                    0,
7083                    1,
7084                    2,
7085                    3,
7086                    4,
7087                    5,
7088                    6,
7089                    7,
7090                    8,
7091                    9,
7092                    10,
7093                    11,
7094                    16 + LANE2 as u32,
7095                    13,
7096                    14,
7097                    15
7098                ]
7099            ),
7100            13 => simd_shuffle!(
7101                a,
7102                b,
7103                [
7104                    0,
7105                    1,
7106                    2,
7107                    3,
7108                    4,
7109                    5,
7110                    6,
7111                    7,
7112                    8,
7113                    9,
7114                    10,
7115                    11,
7116                    12,
7117                    16 + LANE2 as u32,
7118                    14,
7119                    15
7120                ]
7121            ),
7122            14 => simd_shuffle!(
7123                a,
7124                b,
7125                [
7126                    0,
7127                    1,
7128                    2,
7129                    3,
7130                    4,
7131                    5,
7132                    6,
7133                    7,
7134                    8,
7135                    9,
7136                    10,
7137                    11,
7138                    12,
7139                    13,
7140                    16 + LANE2 as u32,
7141                    15
7142                ]
7143            ),
7144            15 => simd_shuffle!(
7145                a,
7146                b,
7147                [
7148                    0,
7149                    1,
7150                    2,
7151                    3,
7152                    4,
7153                    5,
7154                    6,
7155                    7,
7156                    8,
7157                    9,
7158                    10,
7159                    11,
7160                    12,
7161                    13,
7162                    14,
7163                    16 + LANE2 as u32
7164                ]
7165            ),
7166            _ => unreachable_unchecked(),
7167        }
7168    }
7169}
7170#[doc = "Insert vector element from another vector element"]
7171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"]
7172#[inline(always)]
7173#[target_feature(enable = "neon")]
7174#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
7175#[rustc_legacy_const_generics(1, 3)]
7176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7177pub fn vcopyq_laneq_p16<const LANE1: i32, const LANE2: i32>(
7178    a: poly16x8_t,
7179    b: poly16x8_t,
7180) -> poly16x8_t {
7181    static_assert_uimm_bits!(LANE1, 3);
7182    static_assert_uimm_bits!(LANE2, 3);
7183    unsafe {
7184        match LANE1 & 0b111 {
7185            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
7186            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
7187            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
7188            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
7189            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
7190            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
7191            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
7192            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
7193            _ => unreachable_unchecked(),
7194        }
7195    }
7196}
7197#[doc = "Insert vector element from another vector element"]
7198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"]
7199#[inline(always)]
7200#[target_feature(enable = "neon")]
7201#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
7202#[rustc_legacy_const_generics(1, 3)]
7203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7204pub fn vcopyq_laneq_p64<const LANE1: i32, const LANE2: i32>(
7205    a: poly64x2_t,
7206    b: poly64x2_t,
7207) -> poly64x2_t {
7208    static_assert_uimm_bits!(LANE1, 1);
7209    static_assert_uimm_bits!(LANE2, 1);
7210    unsafe {
7211        match LANE1 & 0b1 {
7212            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
7213            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
7214            _ => unreachable_unchecked(),
7215        }
7216    }
7217}
7218#[doc = "Insert vector element from another vector element"]
7219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"]
7220#[inline(always)]
7221#[target_feature(enable = "neon")]
7222#[cfg_attr(test, assert_instr(nop))]
7223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7224pub fn vcreate_f64(a: u64) -> float64x1_t {
7225    unsafe { transmute(a) }
7226}
7227#[doc = "Floating-point convert"]
7228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"]
7229#[inline(always)]
7230#[target_feature(enable = "neon")]
7231#[cfg_attr(test, assert_instr(fcvtn))]
7232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7233pub fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t {
7234    unsafe { simd_cast(a) }
7235}
7236#[doc = "Floating-point convert to higher precision long"]
7237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"]
7238#[inline(always)]
7239#[target_feature(enable = "neon")]
7240#[cfg_attr(test, assert_instr(fcvtl))]
7241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7242pub fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t {
7243    unsafe { simd_cast(a) }
7244}
7245#[doc = "Fixed-point convert to floating-point"]
7246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"]
7247#[inline(always)]
7248#[target_feature(enable = "neon")]
7249#[cfg_attr(test, assert_instr(scvtf))]
7250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7251pub fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t {
7252    unsafe { simd_cast(a) }
7253}
7254#[doc = "Fixed-point convert to floating-point"]
7255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"]
7256#[inline(always)]
7257#[target_feature(enable = "neon")]
7258#[cfg_attr(test, assert_instr(scvtf))]
7259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7260pub fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t {
7261    unsafe { simd_cast(a) }
7262}
7263#[doc = "Fixed-point convert to floating-point"]
7264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"]
7265#[inline(always)]
7266#[target_feature(enable = "neon")]
7267#[cfg_attr(test, assert_instr(ucvtf))]
7268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7269pub fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t {
7270    unsafe { simd_cast(a) }
7271}
7272#[doc = "Fixed-point convert to floating-point"]
7273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"]
7274#[inline(always)]
7275#[target_feature(enable = "neon")]
7276#[cfg_attr(test, assert_instr(ucvtf))]
7277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7278pub fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t {
7279    unsafe { simd_cast(a) }
7280}
7281#[doc = "Floating-point convert to lower precision"]
7282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f16_f32)"]
7283#[inline(always)]
7284#[target_feature(enable = "neon")]
7285#[cfg_attr(test, assert_instr(fcvtn2))]
7286#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7287#[cfg(not(target_arch = "arm64ec"))]
7288pub fn vcvt_high_f16_f32(a: float16x4_t, b: float32x4_t) -> float16x8_t {
7289    vcombine_f16(a, vcvt_f16_f32(b))
7290}
7291#[doc = "Floating-point convert to higher precision"]
7292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f16)"]
7293#[inline(always)]
7294#[target_feature(enable = "neon")]
7295#[cfg_attr(test, assert_instr(fcvtl2))]
7296#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7297#[cfg(not(target_arch = "arm64ec"))]
7298pub fn vcvt_high_f32_f16(a: float16x8_t) -> float32x4_t {
7299    vcvt_f32_f16(vget_high_f16(a))
7300}
7301#[doc = "Floating-point convert to lower precision narrow"]
7302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"]
7303#[inline(always)]
7304#[target_feature(enable = "neon")]
7305#[cfg_attr(test, assert_instr(fcvtn2))]
7306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7307pub fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
7308    unsafe { simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) }
7309}
7310#[doc = "Floating-point convert to higher precision long"]
7311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"]
7312#[inline(always)]
7313#[target_feature(enable = "neon")]
7314#[cfg_attr(test, assert_instr(fcvtl2))]
7315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7316pub fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t {
7317    unsafe {
7318        let b: float32x2_t = simd_shuffle!(a, a, [2, 3]);
7319        simd_cast(b)
7320    }
7321}
7322#[doc = "Fixed-point convert to floating-point"]
7323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"]
7324#[inline(always)]
7325#[target_feature(enable = "neon")]
7326#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7327#[rustc_legacy_const_generics(1)]
7328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7329pub fn vcvt_n_f64_s64<const N: i32>(a: int64x1_t) -> float64x1_t {
7330    static_assert!(N >= 1 && N <= 64);
7331    unsafe extern "unadjusted" {
7332        #[cfg_attr(
7333            any(target_arch = "aarch64", target_arch = "arm64ec"),
7334            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64"
7335        )]
7336        fn _vcvt_n_f64_s64(a: int64x1_t, n: i32) -> float64x1_t;
7337    }
7338    unsafe { _vcvt_n_f64_s64(a, N) }
7339}
7340#[doc = "Fixed-point convert to floating-point"]
7341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"]
7342#[inline(always)]
7343#[target_feature(enable = "neon")]
7344#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7345#[rustc_legacy_const_generics(1)]
7346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7347pub fn vcvtq_n_f64_s64<const N: i32>(a: int64x2_t) -> float64x2_t {
7348    static_assert!(N >= 1 && N <= 64);
7349    unsafe extern "unadjusted" {
7350        #[cfg_attr(
7351            any(target_arch = "aarch64", target_arch = "arm64ec"),
7352            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64"
7353        )]
7354        fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t;
7355    }
7356    unsafe { _vcvtq_n_f64_s64(a, N) }
7357}
7358#[doc = "Fixed-point convert to floating-point"]
7359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"]
7360#[inline(always)]
7361#[target_feature(enable = "neon")]
7362#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7363#[rustc_legacy_const_generics(1)]
7364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7365pub fn vcvt_n_f64_u64<const N: i32>(a: uint64x1_t) -> float64x1_t {
7366    static_assert!(N >= 1 && N <= 64);
7367    unsafe extern "unadjusted" {
7368        #[cfg_attr(
7369            any(target_arch = "aarch64", target_arch = "arm64ec"),
7370            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64"
7371        )]
7372        fn _vcvt_n_f64_u64(a: uint64x1_t, n: i32) -> float64x1_t;
7373    }
7374    unsafe { _vcvt_n_f64_u64(a, N) }
7375}
7376#[doc = "Fixed-point convert to floating-point"]
7377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"]
7378#[inline(always)]
7379#[target_feature(enable = "neon")]
7380#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7381#[rustc_legacy_const_generics(1)]
7382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7383pub fn vcvtq_n_f64_u64<const N: i32>(a: uint64x2_t) -> float64x2_t {
7384    static_assert!(N >= 1 && N <= 64);
7385    unsafe extern "unadjusted" {
7386        #[cfg_attr(
7387            any(target_arch = "aarch64", target_arch = "arm64ec"),
7388            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64"
7389        )]
7390        fn _vcvtq_n_f64_u64(a: uint64x2_t, n: i32) -> float64x2_t;
7391    }
7392    unsafe { _vcvtq_n_f64_u64(a, N) }
7393}
7394#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"]
7396#[inline(always)]
7397#[target_feature(enable = "neon")]
7398#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7399#[rustc_legacy_const_generics(1)]
7400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7401pub fn vcvt_n_s64_f64<const N: i32>(a: float64x1_t) -> int64x1_t {
7402    static_assert!(N >= 1 && N <= 64);
7403    unsafe extern "unadjusted" {
7404        #[cfg_attr(
7405            any(target_arch = "aarch64", target_arch = "arm64ec"),
7406            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64"
7407        )]
7408        fn _vcvt_n_s64_f64(a: float64x1_t, n: i32) -> int64x1_t;
7409    }
7410    unsafe { _vcvt_n_s64_f64(a, N) }
7411}
7412#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"]
7414#[inline(always)]
7415#[target_feature(enable = "neon")]
7416#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7417#[rustc_legacy_const_generics(1)]
7418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7419pub fn vcvtq_n_s64_f64<const N: i32>(a: float64x2_t) -> int64x2_t {
7420    static_assert!(N >= 1 && N <= 64);
7421    unsafe extern "unadjusted" {
7422        #[cfg_attr(
7423            any(target_arch = "aarch64", target_arch = "arm64ec"),
7424            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64"
7425        )]
7426        fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t;
7427    }
7428    unsafe { _vcvtq_n_s64_f64(a, N) }
7429}
7430#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"]
7432#[inline(always)]
7433#[target_feature(enable = "neon")]
7434#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7435#[rustc_legacy_const_generics(1)]
7436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7437pub fn vcvt_n_u64_f64<const N: i32>(a: float64x1_t) -> uint64x1_t {
7438    static_assert!(N >= 1 && N <= 64);
7439    unsafe extern "unadjusted" {
7440        #[cfg_attr(
7441            any(target_arch = "aarch64", target_arch = "arm64ec"),
7442            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64"
7443        )]
7444        fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> uint64x1_t;
7445    }
7446    unsafe { _vcvt_n_u64_f64(a, N) }
7447}
7448#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"]
7450#[inline(always)]
7451#[target_feature(enable = "neon")]
7452#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7453#[rustc_legacy_const_generics(1)]
7454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7455pub fn vcvtq_n_u64_f64<const N: i32>(a: float64x2_t) -> uint64x2_t {
7456    static_assert!(N >= 1 && N <= 64);
7457    unsafe extern "unadjusted" {
7458        #[cfg_attr(
7459            any(target_arch = "aarch64", target_arch = "arm64ec"),
7460            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64"
7461        )]
7462        fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> uint64x2_t;
7463    }
7464    unsafe { _vcvtq_n_u64_f64(a, N) }
7465}
7466#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"]
7468#[inline(always)]
7469#[target_feature(enable = "neon")]
7470#[cfg_attr(test, assert_instr(fcvtzs))]
7471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7472pub fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t {
7473    unsafe extern "unadjusted" {
7474        #[cfg_attr(
7475            any(target_arch = "aarch64", target_arch = "arm64ec"),
7476            link_name = "llvm.fptosi.sat.v1i64.v1f64"
7477        )]
7478        fn _vcvt_s64_f64(a: float64x1_t) -> int64x1_t;
7479    }
7480    unsafe { _vcvt_s64_f64(a) }
7481}
7482#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"]
7484#[inline(always)]
7485#[target_feature(enable = "neon")]
7486#[cfg_attr(test, assert_instr(fcvtzs))]
7487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7488pub fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t {
7489    unsafe extern "unadjusted" {
7490        #[cfg_attr(
7491            any(target_arch = "aarch64", target_arch = "arm64ec"),
7492            link_name = "llvm.fptosi.sat.v2i64.v2f64"
7493        )]
7494        fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t;
7495    }
7496    unsafe { _vcvtq_s64_f64(a) }
7497}
7498#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"]
7500#[inline(always)]
7501#[target_feature(enable = "neon")]
7502#[cfg_attr(test, assert_instr(fcvtzu))]
7503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7504pub fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t {
7505    unsafe extern "unadjusted" {
7506        #[cfg_attr(
7507            any(target_arch = "aarch64", target_arch = "arm64ec"),
7508            link_name = "llvm.fptoui.sat.v1i64.v1f64"
7509        )]
7510        fn _vcvt_u64_f64(a: float64x1_t) -> uint64x1_t;
7511    }
7512    unsafe { _vcvt_u64_f64(a) }
7513}
7514#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"]
7516#[inline(always)]
7517#[target_feature(enable = "neon")]
7518#[cfg_attr(test, assert_instr(fcvtzu))]
7519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7520pub fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t {
7521    unsafe extern "unadjusted" {
7522        #[cfg_attr(
7523            any(target_arch = "aarch64", target_arch = "arm64ec"),
7524            link_name = "llvm.fptoui.sat.v2i64.v2f64"
7525        )]
7526        fn _vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t;
7527    }
7528    unsafe { _vcvtq_u64_f64(a) }
7529}
7530#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s16_f16)"]
7532#[inline(always)]
7533#[cfg_attr(test, assert_instr(fcvtas))]
7534#[target_feature(enable = "neon,fp16")]
7535#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7536#[cfg(not(target_arch = "arm64ec"))]
7537pub fn vcvta_s16_f16(a: float16x4_t) -> int16x4_t {
7538    unsafe extern "unadjusted" {
7539        #[cfg_attr(
7540            any(target_arch = "aarch64", target_arch = "arm64ec"),
7541            link_name = "llvm.aarch64.neon.fcvtas.v4i16.v4f16"
7542        )]
7543        fn _vcvta_s16_f16(a: float16x4_t) -> int16x4_t;
7544    }
7545    unsafe { _vcvta_s16_f16(a) }
7546}
7547#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s16_f16)"]
7549#[inline(always)]
7550#[cfg_attr(test, assert_instr(fcvtas))]
7551#[target_feature(enable = "neon,fp16")]
7552#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7553#[cfg(not(target_arch = "arm64ec"))]
7554pub fn vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t {
7555    unsafe extern "unadjusted" {
7556        #[cfg_attr(
7557            any(target_arch = "aarch64", target_arch = "arm64ec"),
7558            link_name = "llvm.aarch64.neon.fcvtas.v8i16.v8f16"
7559        )]
7560        fn _vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t;
7561    }
7562    unsafe { _vcvtaq_s16_f16(a) }
7563}
7564#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"]
7566#[inline(always)]
7567#[target_feature(enable = "neon")]
7568#[cfg_attr(test, assert_instr(fcvtas))]
7569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7570pub fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t {
7571    unsafe extern "unadjusted" {
7572        #[cfg_attr(
7573            any(target_arch = "aarch64", target_arch = "arm64ec"),
7574            link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32"
7575        )]
7576        fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t;
7577    }
7578    unsafe { _vcvta_s32_f32(a) }
7579}
7580#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"]
7582#[inline(always)]
7583#[target_feature(enable = "neon")]
7584#[cfg_attr(test, assert_instr(fcvtas))]
7585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7586pub fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t {
7587    unsafe extern "unadjusted" {
7588        #[cfg_attr(
7589            any(target_arch = "aarch64", target_arch = "arm64ec"),
7590            link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32"
7591        )]
7592        fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t;
7593    }
7594    unsafe { _vcvtaq_s32_f32(a) }
7595}
7596#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"]
7598#[inline(always)]
7599#[target_feature(enable = "neon")]
7600#[cfg_attr(test, assert_instr(fcvtas))]
7601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7602pub fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t {
7603    unsafe extern "unadjusted" {
7604        #[cfg_attr(
7605            any(target_arch = "aarch64", target_arch = "arm64ec"),
7606            link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64"
7607        )]
7608        fn _vcvta_s64_f64(a: float64x1_t) -> int64x1_t;
7609    }
7610    unsafe { _vcvta_s64_f64(a) }
7611}
7612#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"]
7614#[inline(always)]
7615#[target_feature(enable = "neon")]
7616#[cfg_attr(test, assert_instr(fcvtas))]
7617#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7618pub fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t {
7619    unsafe extern "unadjusted" {
7620        #[cfg_attr(
7621            any(target_arch = "aarch64", target_arch = "arm64ec"),
7622            link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64"
7623        )]
7624        fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t;
7625    }
7626    unsafe { _vcvtaq_s64_f64(a) }
7627}
7628#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u16_f16)"]
7630#[inline(always)]
7631#[cfg_attr(test, assert_instr(fcvtau))]
7632#[target_feature(enable = "neon,fp16")]
7633#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7634#[cfg(not(target_arch = "arm64ec"))]
7635pub fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t {
7636    unsafe extern "unadjusted" {
7637        #[cfg_attr(
7638            any(target_arch = "aarch64", target_arch = "arm64ec"),
7639            link_name = "llvm.aarch64.neon.fcvtau.v4i16.v4f16"
7640        )]
7641        fn _vcvta_u16_f16(a: float16x4_t) -> uint16x4_t;
7642    }
7643    unsafe { _vcvta_u16_f16(a) }
7644}
7645#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u16_f16)"]
7647#[inline(always)]
7648#[cfg_attr(test, assert_instr(fcvtau))]
7649#[target_feature(enable = "neon,fp16")]
7650#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7651#[cfg(not(target_arch = "arm64ec"))]
7652pub fn vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t {
7653    unsafe extern "unadjusted" {
7654        #[cfg_attr(
7655            any(target_arch = "aarch64", target_arch = "arm64ec"),
7656            link_name = "llvm.aarch64.neon.fcvtau.v8i16.v8f16"
7657        )]
7658        fn _vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t;
7659    }
7660    unsafe { _vcvtaq_u16_f16(a) }
7661}
7662#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"]
7664#[inline(always)]
7665#[target_feature(enable = "neon")]
7666#[cfg_attr(test, assert_instr(fcvtau))]
7667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7668pub fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t {
7669    unsafe extern "unadjusted" {
7670        #[cfg_attr(
7671            any(target_arch = "aarch64", target_arch = "arm64ec"),
7672            link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32"
7673        )]
7674        fn _vcvta_u32_f32(a: float32x2_t) -> uint32x2_t;
7675    }
7676    unsafe { _vcvta_u32_f32(a) }
7677}
7678#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"]
7680#[inline(always)]
7681#[target_feature(enable = "neon")]
7682#[cfg_attr(test, assert_instr(fcvtau))]
7683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7684pub fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t {
7685    unsafe extern "unadjusted" {
7686        #[cfg_attr(
7687            any(target_arch = "aarch64", target_arch = "arm64ec"),
7688            link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32"
7689        )]
7690        fn _vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t;
7691    }
7692    unsafe { _vcvtaq_u32_f32(a) }
7693}
7694#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"]
7696#[inline(always)]
7697#[target_feature(enable = "neon")]
7698#[cfg_attr(test, assert_instr(fcvtau))]
7699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7700pub fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t {
7701    unsafe extern "unadjusted" {
7702        #[cfg_attr(
7703            any(target_arch = "aarch64", target_arch = "arm64ec"),
7704            link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64"
7705        )]
7706        fn _vcvta_u64_f64(a: float64x1_t) -> uint64x1_t;
7707    }
7708    unsafe { _vcvta_u64_f64(a) }
7709}
7710#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"]
7712#[inline(always)]
7713#[target_feature(enable = "neon")]
7714#[cfg_attr(test, assert_instr(fcvtau))]
7715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7716pub fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t {
7717    unsafe extern "unadjusted" {
7718        #[cfg_attr(
7719            any(target_arch = "aarch64", target_arch = "arm64ec"),
7720            link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64"
7721        )]
7722        fn _vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t;
7723    }
7724    unsafe { _vcvtaq_u64_f64(a) }
7725}
7726#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s16_f16)"]
7728#[inline(always)]
7729#[cfg_attr(test, assert_instr(fcvtas))]
7730#[target_feature(enable = "neon,fp16")]
7731#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7732#[cfg(not(target_arch = "arm64ec"))]
7733pub fn vcvtah_s16_f16(a: f16) -> i16 {
7734    vcvtah_s32_f16(a) as i16
7735}
7736#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s32_f16)"]
7738#[inline(always)]
7739#[cfg_attr(test, assert_instr(fcvtas))]
7740#[target_feature(enable = "neon,fp16")]
7741#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7742#[cfg(not(target_arch = "arm64ec"))]
7743pub fn vcvtah_s32_f16(a: f16) -> i32 {
7744    unsafe extern "unadjusted" {
7745        #[cfg_attr(
7746            any(target_arch = "aarch64", target_arch = "arm64ec"),
7747            link_name = "llvm.aarch64.neon.fcvtas.i32.f16"
7748        )]
7749        fn _vcvtah_s32_f16(a: f16) -> i32;
7750    }
7751    unsafe { _vcvtah_s32_f16(a) }
7752}
7753#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s64_f16)"]
7755#[inline(always)]
7756#[cfg_attr(test, assert_instr(fcvtas))]
7757#[target_feature(enable = "neon,fp16")]
7758#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7759#[cfg(not(target_arch = "arm64ec"))]
7760pub fn vcvtah_s64_f16(a: f16) -> i64 {
7761    unsafe extern "unadjusted" {
7762        #[cfg_attr(
7763            any(target_arch = "aarch64", target_arch = "arm64ec"),
7764            link_name = "llvm.aarch64.neon.fcvtas.i64.f16"
7765        )]
7766        fn _vcvtah_s64_f16(a: f16) -> i64;
7767    }
7768    unsafe { _vcvtah_s64_f16(a) }
7769}
7770#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u16_f16)"]
7772#[inline(always)]
7773#[cfg_attr(test, assert_instr(fcvtau))]
7774#[target_feature(enable = "neon,fp16")]
7775#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7776#[cfg(not(target_arch = "arm64ec"))]
7777pub fn vcvtah_u16_f16(a: f16) -> u16 {
7778    vcvtah_u32_f16(a) as u16
7779}
7780#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u32_f16)"]
7782#[inline(always)]
7783#[cfg_attr(test, assert_instr(fcvtau))]
7784#[target_feature(enable = "neon,fp16")]
7785#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7786#[cfg(not(target_arch = "arm64ec"))]
7787pub fn vcvtah_u32_f16(a: f16) -> u32 {
7788    unsafe extern "unadjusted" {
7789        #[cfg_attr(
7790            any(target_arch = "aarch64", target_arch = "arm64ec"),
7791            link_name = "llvm.aarch64.neon.fcvtau.i32.f16"
7792        )]
7793        fn _vcvtah_u32_f16(a: f16) -> u32;
7794    }
7795    unsafe { _vcvtah_u32_f16(a) }
7796}
7797#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u64_f16)"]
7799#[inline(always)]
7800#[cfg_attr(test, assert_instr(fcvtau))]
7801#[target_feature(enable = "neon,fp16")]
7802#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7803#[cfg(not(target_arch = "arm64ec"))]
7804pub fn vcvtah_u64_f16(a: f16) -> u64 {
7805    unsafe extern "unadjusted" {
7806        #[cfg_attr(
7807            any(target_arch = "aarch64", target_arch = "arm64ec"),
7808            link_name = "llvm.aarch64.neon.fcvtau.i64.f16"
7809        )]
7810        fn _vcvtah_u64_f16(a: f16) -> u64;
7811    }
7812    unsafe { _vcvtah_u64_f16(a) }
7813}
7814#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"]
7816#[inline(always)]
7817#[target_feature(enable = "neon")]
7818#[cfg_attr(test, assert_instr(fcvtas))]
7819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7820pub fn vcvtas_s32_f32(a: f32) -> i32 {
7821    unsafe extern "unadjusted" {
7822        #[cfg_attr(
7823            any(target_arch = "aarch64", target_arch = "arm64ec"),
7824            link_name = "llvm.aarch64.neon.fcvtas.i32.f32"
7825        )]
7826        fn _vcvtas_s32_f32(a: f32) -> i32;
7827    }
7828    unsafe { _vcvtas_s32_f32(a) }
7829}
7830#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"]
7832#[inline(always)]
7833#[target_feature(enable = "neon")]
7834#[cfg_attr(test, assert_instr(fcvtas))]
7835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7836pub fn vcvtad_s64_f64(a: f64) -> i64 {
7837    unsafe extern "unadjusted" {
7838        #[cfg_attr(
7839            any(target_arch = "aarch64", target_arch = "arm64ec"),
7840            link_name = "llvm.aarch64.neon.fcvtas.i64.f64"
7841        )]
7842        fn _vcvtad_s64_f64(a: f64) -> i64;
7843    }
7844    unsafe { _vcvtad_s64_f64(a) }
7845}
7846#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"]
7848#[inline(always)]
7849#[target_feature(enable = "neon")]
7850#[cfg_attr(test, assert_instr(fcvtau))]
7851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7852pub fn vcvtas_u32_f32(a: f32) -> u32 {
7853    unsafe extern "unadjusted" {
7854        #[cfg_attr(
7855            any(target_arch = "aarch64", target_arch = "arm64ec"),
7856            link_name = "llvm.aarch64.neon.fcvtau.i32.f32"
7857        )]
7858        fn _vcvtas_u32_f32(a: f32) -> u32;
7859    }
7860    unsafe { _vcvtas_u32_f32(a) }
7861}
7862#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"]
7864#[inline(always)]
7865#[target_feature(enable = "neon")]
7866#[cfg_attr(test, assert_instr(fcvtau))]
7867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7868pub fn vcvtad_u64_f64(a: f64) -> u64 {
7869    unsafe extern "unadjusted" {
7870        #[cfg_attr(
7871            any(target_arch = "aarch64", target_arch = "arm64ec"),
7872            link_name = "llvm.aarch64.neon.fcvtau.i64.f64"
7873        )]
7874        fn _vcvtad_u64_f64(a: f64) -> u64;
7875    }
7876    unsafe { _vcvtad_u64_f64(a) }
7877}
7878#[doc = "Fixed-point convert to floating-point"]
7879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"]
7880#[inline(always)]
7881#[target_feature(enable = "neon")]
7882#[cfg_attr(test, assert_instr(scvtf))]
7883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7884pub fn vcvtd_f64_s64(a: i64) -> f64 {
7885    a as f64
7886}
7887#[doc = "Fixed-point convert to floating-point"]
7888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"]
7889#[inline(always)]
7890#[target_feature(enable = "neon")]
7891#[cfg_attr(test, assert_instr(scvtf))]
7892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7893pub fn vcvts_f32_s32(a: i32) -> f32 {
7894    a as f32
7895}
7896#[doc = "Fixed-point convert to floating-point"]
7897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s16)"]
7898#[inline(always)]
7899#[cfg_attr(test, assert_instr(scvtf))]
7900#[target_feature(enable = "neon,fp16")]
7901#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7902#[cfg(not(target_arch = "arm64ec"))]
7903pub fn vcvth_f16_s16(a: i16) -> f16 {
7904    a as f16
7905}
7906#[doc = "Fixed-point convert to floating-point"]
7907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s32)"]
7908#[inline(always)]
7909#[cfg_attr(test, assert_instr(scvtf))]
7910#[target_feature(enable = "neon,fp16")]
7911#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7912#[cfg(not(target_arch = "arm64ec"))]
7913pub fn vcvth_f16_s32(a: i32) -> f16 {
7914    a as f16
7915}
7916#[doc = "Fixed-point convert to floating-point"]
7917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s64)"]
7918#[inline(always)]
7919#[cfg_attr(test, assert_instr(scvtf))]
7920#[target_feature(enable = "neon,fp16")]
7921#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7922#[cfg(not(target_arch = "arm64ec"))]
7923pub fn vcvth_f16_s64(a: i64) -> f16 {
7924    a as f16
7925}
7926#[doc = "Unsigned fixed-point convert to floating-point"]
7927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u16)"]
7928#[inline(always)]
7929#[cfg_attr(test, assert_instr(ucvtf))]
7930#[target_feature(enable = "neon,fp16")]
7931#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7932#[cfg(not(target_arch = "arm64ec"))]
7933pub fn vcvth_f16_u16(a: u16) -> f16 {
7934    a as f16
7935}
7936#[doc = "Unsigned fixed-point convert to floating-point"]
7937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u32)"]
7938#[inline(always)]
7939#[cfg_attr(test, assert_instr(ucvtf))]
7940#[target_feature(enable = "neon,fp16")]
7941#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7942#[cfg(not(target_arch = "arm64ec"))]
7943pub fn vcvth_f16_u32(a: u32) -> f16 {
7944    a as f16
7945}
7946#[doc = "Unsigned fixed-point convert to floating-point"]
7947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u64)"]
7948#[inline(always)]
7949#[cfg_attr(test, assert_instr(ucvtf))]
7950#[target_feature(enable = "neon,fp16")]
7951#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7952#[cfg(not(target_arch = "arm64ec"))]
7953pub fn vcvth_f16_u64(a: u64) -> f16 {
7954    a as f16
7955}
7956#[doc = "Fixed-point convert to floating-point"]
7957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s16)"]
7958#[inline(always)]
7959#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7960#[rustc_legacy_const_generics(1)]
7961#[target_feature(enable = "neon,fp16")]
7962#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7963#[cfg(not(target_arch = "arm64ec"))]
7964pub fn vcvth_n_f16_s16<const N: i32>(a: i16) -> f16 {
7965    static_assert!(N >= 1 && N <= 16);
7966    vcvth_n_f16_s32::<N>(a as i32)
7967}
7968#[doc = "Fixed-point convert to floating-point"]
7969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s32)"]
7970#[inline(always)]
7971#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7972#[rustc_legacy_const_generics(1)]
7973#[target_feature(enable = "neon,fp16")]
7974#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7975#[cfg(not(target_arch = "arm64ec"))]
7976pub fn vcvth_n_f16_s32<const N: i32>(a: i32) -> f16 {
7977    static_assert!(N >= 1 && N <= 16);
7978    unsafe extern "unadjusted" {
7979        #[cfg_attr(
7980            any(target_arch = "aarch64", target_arch = "arm64ec"),
7981            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i32"
7982        )]
7983        fn _vcvth_n_f16_s32(a: i32, n: i32) -> f16;
7984    }
7985    unsafe { _vcvth_n_f16_s32(a, N) }
7986}
7987#[doc = "Fixed-point convert to floating-point"]
7988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s64)"]
7989#[inline(always)]
7990#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7991#[rustc_legacy_const_generics(1)]
7992#[target_feature(enable = "neon,fp16")]
7993#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7994#[cfg(not(target_arch = "arm64ec"))]
7995pub fn vcvth_n_f16_s64<const N: i32>(a: i64) -> f16 {
7996    static_assert!(N >= 1 && N <= 16);
7997    unsafe extern "unadjusted" {
7998        #[cfg_attr(
7999            any(target_arch = "aarch64", target_arch = "arm64ec"),
8000            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i64"
8001        )]
8002        fn _vcvth_n_f16_s64(a: i64, n: i32) -> f16;
8003    }
8004    unsafe { _vcvth_n_f16_s64(a, N) }
8005}
8006#[doc = "Fixed-point convert to floating-point"]
8007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u16)"]
8008#[inline(always)]
8009#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8010#[rustc_legacy_const_generics(1)]
8011#[target_feature(enable = "neon,fp16")]
8012#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8013#[cfg(not(target_arch = "arm64ec"))]
8014pub fn vcvth_n_f16_u16<const N: i32>(a: u16) -> f16 {
8015    static_assert!(N >= 1 && N <= 16);
8016    vcvth_n_f16_u32::<N>(a as u32)
8017}
8018#[doc = "Fixed-point convert to floating-point"]
8019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u32)"]
8020#[inline(always)]
8021#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8022#[rustc_legacy_const_generics(1)]
8023#[target_feature(enable = "neon,fp16")]
8024#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8025#[cfg(not(target_arch = "arm64ec"))]
8026pub fn vcvth_n_f16_u32<const N: i32>(a: u32) -> f16 {
8027    static_assert!(N >= 1 && N <= 16);
8028    unsafe extern "unadjusted" {
8029        #[cfg_attr(
8030            any(target_arch = "aarch64", target_arch = "arm64ec"),
8031            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i32"
8032        )]
8033        fn _vcvth_n_f16_u32(a: u32, n: i32) -> f16;
8034    }
8035    unsafe { _vcvth_n_f16_u32(a, N) }
8036}
8037#[doc = "Fixed-point convert to floating-point"]
8038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u64)"]
8039#[inline(always)]
8040#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8041#[rustc_legacy_const_generics(1)]
8042#[target_feature(enable = "neon,fp16")]
8043#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8044#[cfg(not(target_arch = "arm64ec"))]
8045pub fn vcvth_n_f16_u64<const N: i32>(a: u64) -> f16 {
8046    static_assert!(N >= 1 && N <= 16);
8047    unsafe extern "unadjusted" {
8048        #[cfg_attr(
8049            any(target_arch = "aarch64", target_arch = "arm64ec"),
8050            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i64"
8051        )]
8052        fn _vcvth_n_f16_u64(a: u64, n: i32) -> f16;
8053    }
8054    unsafe { _vcvth_n_f16_u64(a, N) }
8055}
8056#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s16_f16)"]
8058#[inline(always)]
8059#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8060#[rustc_legacy_const_generics(1)]
8061#[target_feature(enable = "neon,fp16")]
8062#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8063#[cfg(not(target_arch = "arm64ec"))]
8064pub fn vcvth_n_s16_f16<const N: i32>(a: f16) -> i16 {
8065    static_assert!(N >= 1 && N <= 16);
8066    vcvth_n_s32_f16::<N>(a) as i16
8067}
8068#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s32_f16)"]
8070#[inline(always)]
8071#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8072#[rustc_legacy_const_generics(1)]
8073#[target_feature(enable = "neon,fp16")]
8074#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8075#[cfg(not(target_arch = "arm64ec"))]
8076pub fn vcvth_n_s32_f16<const N: i32>(a: f16) -> i32 {
8077    static_assert!(N >= 1 && N <= 16);
8078    unsafe extern "unadjusted" {
8079        #[cfg_attr(
8080            any(target_arch = "aarch64", target_arch = "arm64ec"),
8081            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f16"
8082        )]
8083        fn _vcvth_n_s32_f16(a: f16, n: i32) -> i32;
8084    }
8085    unsafe { _vcvth_n_s32_f16(a, N) }
8086}
8087#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s64_f16)"]
8089#[inline(always)]
8090#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8091#[rustc_legacy_const_generics(1)]
8092#[target_feature(enable = "neon,fp16")]
8093#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8094#[cfg(not(target_arch = "arm64ec"))]
8095pub fn vcvth_n_s64_f16<const N: i32>(a: f16) -> i64 {
8096    static_assert!(N >= 1 && N <= 16);
8097    unsafe extern "unadjusted" {
8098        #[cfg_attr(
8099            any(target_arch = "aarch64", target_arch = "arm64ec"),
8100            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f16"
8101        )]
8102        fn _vcvth_n_s64_f16(a: f16, n: i32) -> i64;
8103    }
8104    unsafe { _vcvth_n_s64_f16(a, N) }
8105}
8106#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u16_f16)"]
8108#[inline(always)]
8109#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8110#[rustc_legacy_const_generics(1)]
8111#[target_feature(enable = "neon,fp16")]
8112#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8113#[cfg(not(target_arch = "arm64ec"))]
8114pub fn vcvth_n_u16_f16<const N: i32>(a: f16) -> u16 {
8115    static_assert!(N >= 1 && N <= 16);
8116    vcvth_n_u32_f16::<N>(a) as u16
8117}
8118#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u32_f16)"]
8120#[inline(always)]
8121#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8122#[rustc_legacy_const_generics(1)]
8123#[target_feature(enable = "neon,fp16")]
8124#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8125#[cfg(not(target_arch = "arm64ec"))]
8126pub fn vcvth_n_u32_f16<const N: i32>(a: f16) -> u32 {
8127    static_assert!(N >= 1 && N <= 16);
8128    unsafe extern "unadjusted" {
8129        #[cfg_attr(
8130            any(target_arch = "aarch64", target_arch = "arm64ec"),
8131            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f16"
8132        )]
8133        fn _vcvth_n_u32_f16(a: f16, n: i32) -> u32;
8134    }
8135    unsafe { _vcvth_n_u32_f16(a, N) }
8136}
8137#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u64_f16)"]
8139#[inline(always)]
8140#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8141#[rustc_legacy_const_generics(1)]
8142#[target_feature(enable = "neon,fp16")]
8143#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8144#[cfg(not(target_arch = "arm64ec"))]
8145pub fn vcvth_n_u64_f16<const N: i32>(a: f16) -> u64 {
8146    static_assert!(N >= 1 && N <= 16);
8147    unsafe extern "unadjusted" {
8148        #[cfg_attr(
8149            any(target_arch = "aarch64", target_arch = "arm64ec"),
8150            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f16"
8151        )]
8152        fn _vcvth_n_u64_f16(a: f16, n: i32) -> u64;
8153    }
8154    unsafe { _vcvth_n_u64_f16(a, N) }
8155}
8156#[doc = "Floating-point convert to signed fixed-point"]
8157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s16_f16)"]
8158#[inline(always)]
8159#[cfg_attr(test, assert_instr(fcvtzs))]
8160#[target_feature(enable = "neon,fp16")]
8161#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8162#[cfg(not(target_arch = "arm64ec"))]
8163pub fn vcvth_s16_f16(a: f16) -> i16 {
8164    a as i16
8165}
8166#[doc = "Floating-point convert to signed fixed-point"]
8167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s32_f16)"]
8168#[inline(always)]
8169#[cfg_attr(test, assert_instr(fcvtzs))]
8170#[target_feature(enable = "neon,fp16")]
8171#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8172#[cfg(not(target_arch = "arm64ec"))]
8173pub fn vcvth_s32_f16(a: f16) -> i32 {
8174    a as i32
8175}
8176#[doc = "Floating-point convert to signed fixed-point"]
8177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s64_f16)"]
8178#[inline(always)]
8179#[cfg_attr(test, assert_instr(fcvtzs))]
8180#[target_feature(enable = "neon,fp16")]
8181#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8182#[cfg(not(target_arch = "arm64ec"))]
8183pub fn vcvth_s64_f16(a: f16) -> i64 {
8184    a as i64
8185}
8186#[doc = "Floating-point convert to unsigned fixed-point"]
8187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u16_f16)"]
8188#[inline(always)]
8189#[cfg_attr(test, assert_instr(fcvtzu))]
8190#[target_feature(enable = "neon,fp16")]
8191#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8192#[cfg(not(target_arch = "arm64ec"))]
8193pub fn vcvth_u16_f16(a: f16) -> u16 {
8194    a as u16
8195}
8196#[doc = "Floating-point convert to unsigned fixed-point"]
8197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u32_f16)"]
8198#[inline(always)]
8199#[cfg_attr(test, assert_instr(fcvtzu))]
8200#[target_feature(enable = "neon,fp16")]
8201#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8202#[cfg(not(target_arch = "arm64ec"))]
8203pub fn vcvth_u32_f16(a: f16) -> u32 {
8204    a as u32
8205}
8206#[doc = "Floating-point convert to unsigned fixed-point"]
8207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u64_f16)"]
8208#[inline(always)]
8209#[cfg_attr(test, assert_instr(fcvtzu))]
8210#[target_feature(enable = "neon,fp16")]
8211#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8212#[cfg(not(target_arch = "arm64ec"))]
8213pub fn vcvth_u64_f16(a: f16) -> u64 {
8214    a as u64
8215}
8216#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s16_f16)"]
8218#[inline(always)]
8219#[cfg_attr(test, assert_instr(fcvtms))]
8220#[target_feature(enable = "neon,fp16")]
8221#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8222#[cfg(not(target_arch = "arm64ec"))]
8223pub fn vcvtm_s16_f16(a: float16x4_t) -> int16x4_t {
8224    unsafe extern "unadjusted" {
8225        #[cfg_attr(
8226            any(target_arch = "aarch64", target_arch = "arm64ec"),
8227            link_name = "llvm.aarch64.neon.fcvtms.v4i16.v4f16"
8228        )]
8229        fn _vcvtm_s16_f16(a: float16x4_t) -> int16x4_t;
8230    }
8231    unsafe { _vcvtm_s16_f16(a) }
8232}
8233#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s16_f16)"]
8235#[inline(always)]
8236#[cfg_attr(test, assert_instr(fcvtms))]
8237#[target_feature(enable = "neon,fp16")]
8238#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8239#[cfg(not(target_arch = "arm64ec"))]
8240pub fn vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t {
8241    unsafe extern "unadjusted" {
8242        #[cfg_attr(
8243            any(target_arch = "aarch64", target_arch = "arm64ec"),
8244            link_name = "llvm.aarch64.neon.fcvtms.v8i16.v8f16"
8245        )]
8246        fn _vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t;
8247    }
8248    unsafe { _vcvtmq_s16_f16(a) }
8249}
8250#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"]
8252#[inline(always)]
8253#[target_feature(enable = "neon")]
8254#[cfg_attr(test, assert_instr(fcvtms))]
8255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8256pub fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t {
8257    unsafe extern "unadjusted" {
8258        #[cfg_attr(
8259            any(target_arch = "aarch64", target_arch = "arm64ec"),
8260            link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32"
8261        )]
8262        fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t;
8263    }
8264    unsafe { _vcvtm_s32_f32(a) }
8265}
8266#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"]
8268#[inline(always)]
8269#[target_feature(enable = "neon")]
8270#[cfg_attr(test, assert_instr(fcvtms))]
8271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8272pub fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t {
8273    unsafe extern "unadjusted" {
8274        #[cfg_attr(
8275            any(target_arch = "aarch64", target_arch = "arm64ec"),
8276            link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32"
8277        )]
8278        fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t;
8279    }
8280    unsafe { _vcvtmq_s32_f32(a) }
8281}
8282#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"]
8284#[inline(always)]
8285#[target_feature(enable = "neon")]
8286#[cfg_attr(test, assert_instr(fcvtms))]
8287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8288pub fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t {
8289    unsafe extern "unadjusted" {
8290        #[cfg_attr(
8291            any(target_arch = "aarch64", target_arch = "arm64ec"),
8292            link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64"
8293        )]
8294        fn _vcvtm_s64_f64(a: float64x1_t) -> int64x1_t;
8295    }
8296    unsafe { _vcvtm_s64_f64(a) }
8297}
8298#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"]
8300#[inline(always)]
8301#[target_feature(enable = "neon")]
8302#[cfg_attr(test, assert_instr(fcvtms))]
8303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8304pub fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t {
8305    unsafe extern "unadjusted" {
8306        #[cfg_attr(
8307            any(target_arch = "aarch64", target_arch = "arm64ec"),
8308            link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64"
8309        )]
8310        fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t;
8311    }
8312    unsafe { _vcvtmq_s64_f64(a) }
8313}
8314#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u16_f16)"]
8316#[inline(always)]
8317#[cfg_attr(test, assert_instr(fcvtmu))]
8318#[target_feature(enable = "neon,fp16")]
8319#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8320#[cfg(not(target_arch = "arm64ec"))]
8321pub fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t {
8322    unsafe extern "unadjusted" {
8323        #[cfg_attr(
8324            any(target_arch = "aarch64", target_arch = "arm64ec"),
8325            link_name = "llvm.aarch64.neon.fcvtmu.v4i16.v4f16"
8326        )]
8327        fn _vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t;
8328    }
8329    unsafe { _vcvtm_u16_f16(a) }
8330}
8331#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u16_f16)"]
8333#[inline(always)]
8334#[cfg_attr(test, assert_instr(fcvtmu))]
8335#[target_feature(enable = "neon,fp16")]
8336#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8337#[cfg(not(target_arch = "arm64ec"))]
8338pub fn vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t {
8339    unsafe extern "unadjusted" {
8340        #[cfg_attr(
8341            any(target_arch = "aarch64", target_arch = "arm64ec"),
8342            link_name = "llvm.aarch64.neon.fcvtmu.v8i16.v8f16"
8343        )]
8344        fn _vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t;
8345    }
8346    unsafe { _vcvtmq_u16_f16(a) }
8347}
8348#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"]
8350#[inline(always)]
8351#[target_feature(enable = "neon")]
8352#[cfg_attr(test, assert_instr(fcvtmu))]
8353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8354pub fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t {
8355    unsafe extern "unadjusted" {
8356        #[cfg_attr(
8357            any(target_arch = "aarch64", target_arch = "arm64ec"),
8358            link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32"
8359        )]
8360        fn _vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t;
8361    }
8362    unsafe { _vcvtm_u32_f32(a) }
8363}
8364#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"]
8366#[inline(always)]
8367#[target_feature(enable = "neon")]
8368#[cfg_attr(test, assert_instr(fcvtmu))]
8369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8370pub fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t {
8371    unsafe extern "unadjusted" {
8372        #[cfg_attr(
8373            any(target_arch = "aarch64", target_arch = "arm64ec"),
8374            link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32"
8375        )]
8376        fn _vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t;
8377    }
8378    unsafe { _vcvtmq_u32_f32(a) }
8379}
8380#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"]
8382#[inline(always)]
8383#[target_feature(enable = "neon")]
8384#[cfg_attr(test, assert_instr(fcvtmu))]
8385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8386pub fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t {
8387    unsafe extern "unadjusted" {
8388        #[cfg_attr(
8389            any(target_arch = "aarch64", target_arch = "arm64ec"),
8390            link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64"
8391        )]
8392        fn _vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t;
8393    }
8394    unsafe { _vcvtm_u64_f64(a) }
8395}
8396#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"]
8398#[inline(always)]
8399#[target_feature(enable = "neon")]
8400#[cfg_attr(test, assert_instr(fcvtmu))]
8401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8402pub fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t {
8403    unsafe extern "unadjusted" {
8404        #[cfg_attr(
8405            any(target_arch = "aarch64", target_arch = "arm64ec"),
8406            link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64"
8407        )]
8408        fn _vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t;
8409    }
8410    unsafe { _vcvtmq_u64_f64(a) }
8411}
8412#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s16_f16)"]
8414#[inline(always)]
8415#[cfg_attr(test, assert_instr(fcvtms))]
8416#[target_feature(enable = "neon,fp16")]
8417#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8418#[cfg(not(target_arch = "arm64ec"))]
8419pub fn vcvtmh_s16_f16(a: f16) -> i16 {
8420    vcvtmh_s32_f16(a) as i16
8421}
8422#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s32_f16)"]
8424#[inline(always)]
8425#[cfg_attr(test, assert_instr(fcvtms))]
8426#[target_feature(enable = "neon,fp16")]
8427#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8428#[cfg(not(target_arch = "arm64ec"))]
8429pub fn vcvtmh_s32_f16(a: f16) -> i32 {
8430    unsafe extern "unadjusted" {
8431        #[cfg_attr(
8432            any(target_arch = "aarch64", target_arch = "arm64ec"),
8433            link_name = "llvm.aarch64.neon.fcvtms.i32.f16"
8434        )]
8435        fn _vcvtmh_s32_f16(a: f16) -> i32;
8436    }
8437    unsafe { _vcvtmh_s32_f16(a) }
8438}
8439#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s64_f16)"]
8441#[inline(always)]
8442#[cfg_attr(test, assert_instr(fcvtms))]
8443#[target_feature(enable = "neon,fp16")]
8444#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8445#[cfg(not(target_arch = "arm64ec"))]
8446pub fn vcvtmh_s64_f16(a: f16) -> i64 {
8447    unsafe extern "unadjusted" {
8448        #[cfg_attr(
8449            any(target_arch = "aarch64", target_arch = "arm64ec"),
8450            link_name = "llvm.aarch64.neon.fcvtms.i64.f16"
8451        )]
8452        fn _vcvtmh_s64_f16(a: f16) -> i64;
8453    }
8454    unsafe { _vcvtmh_s64_f16(a) }
8455}
8456#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u16_f16)"]
8458#[inline(always)]
8459#[cfg_attr(test, assert_instr(fcvtmu))]
8460#[target_feature(enable = "neon,fp16")]
8461#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8462#[cfg(not(target_arch = "arm64ec"))]
8463pub fn vcvtmh_u16_f16(a: f16) -> u16 {
8464    vcvtmh_u32_f16(a) as u16
8465}
8466#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u32_f16)"]
8468#[inline(always)]
8469#[cfg_attr(test, assert_instr(fcvtmu))]
8470#[target_feature(enable = "neon,fp16")]
8471#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8472#[cfg(not(target_arch = "arm64ec"))]
8473pub fn vcvtmh_u32_f16(a: f16) -> u32 {
8474    unsafe extern "unadjusted" {
8475        #[cfg_attr(
8476            any(target_arch = "aarch64", target_arch = "arm64ec"),
8477            link_name = "llvm.aarch64.neon.fcvtmu.i32.f16"
8478        )]
8479        fn _vcvtmh_u32_f16(a: f16) -> u32;
8480    }
8481    unsafe { _vcvtmh_u32_f16(a) }
8482}
8483#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u64_f16)"]
8485#[inline(always)]
8486#[cfg_attr(test, assert_instr(fcvtmu))]
8487#[target_feature(enable = "neon,fp16")]
8488#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8489#[cfg(not(target_arch = "arm64ec"))]
8490pub fn vcvtmh_u64_f16(a: f16) -> u64 {
8491    unsafe extern "unadjusted" {
8492        #[cfg_attr(
8493            any(target_arch = "aarch64", target_arch = "arm64ec"),
8494            link_name = "llvm.aarch64.neon.fcvtmu.i64.f16"
8495        )]
8496        fn _vcvtmh_u64_f16(a: f16) -> u64;
8497    }
8498    unsafe { _vcvtmh_u64_f16(a) }
8499}
8500#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"]
8502#[inline(always)]
8503#[target_feature(enable = "neon")]
8504#[cfg_attr(test, assert_instr(fcvtms))]
8505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8506pub fn vcvtms_s32_f32(a: f32) -> i32 {
8507    unsafe extern "unadjusted" {
8508        #[cfg_attr(
8509            any(target_arch = "aarch64", target_arch = "arm64ec"),
8510            link_name = "llvm.aarch64.neon.fcvtms.i32.f32"
8511        )]
8512        fn _vcvtms_s32_f32(a: f32) -> i32;
8513    }
8514    unsafe { _vcvtms_s32_f32(a) }
8515}
8516#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"]
8518#[inline(always)]
8519#[target_feature(enable = "neon")]
8520#[cfg_attr(test, assert_instr(fcvtms))]
8521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8522pub fn vcvtmd_s64_f64(a: f64) -> i64 {
8523    unsafe extern "unadjusted" {
8524        #[cfg_attr(
8525            any(target_arch = "aarch64", target_arch = "arm64ec"),
8526            link_name = "llvm.aarch64.neon.fcvtms.i64.f64"
8527        )]
8528        fn _vcvtmd_s64_f64(a: f64) -> i64;
8529    }
8530    unsafe { _vcvtmd_s64_f64(a) }
8531}
8532#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"]
8534#[inline(always)]
8535#[target_feature(enable = "neon")]
8536#[cfg_attr(test, assert_instr(fcvtmu))]
8537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8538pub fn vcvtms_u32_f32(a: f32) -> u32 {
8539    unsafe extern "unadjusted" {
8540        #[cfg_attr(
8541            any(target_arch = "aarch64", target_arch = "arm64ec"),
8542            link_name = "llvm.aarch64.neon.fcvtmu.i32.f32"
8543        )]
8544        fn _vcvtms_u32_f32(a: f32) -> u32;
8545    }
8546    unsafe { _vcvtms_u32_f32(a) }
8547}
8548#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"]
8550#[inline(always)]
8551#[target_feature(enable = "neon")]
8552#[cfg_attr(test, assert_instr(fcvtmu))]
8553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8554pub fn vcvtmd_u64_f64(a: f64) -> u64 {
8555    unsafe extern "unadjusted" {
8556        #[cfg_attr(
8557            any(target_arch = "aarch64", target_arch = "arm64ec"),
8558            link_name = "llvm.aarch64.neon.fcvtmu.i64.f64"
8559        )]
8560        fn _vcvtmd_u64_f64(a: f64) -> u64;
8561    }
8562    unsafe { _vcvtmd_u64_f64(a) }
8563}
8564#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s16_f16)"]
8566#[inline(always)]
8567#[cfg_attr(test, assert_instr(fcvtns))]
8568#[target_feature(enable = "neon,fp16")]
8569#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8570#[cfg(not(target_arch = "arm64ec"))]
8571pub fn vcvtn_s16_f16(a: float16x4_t) -> int16x4_t {
8572    unsafe extern "unadjusted" {
8573        #[cfg_attr(
8574            any(target_arch = "aarch64", target_arch = "arm64ec"),
8575            link_name = "llvm.aarch64.neon.fcvtns.v4i16.v4f16"
8576        )]
8577        fn _vcvtn_s16_f16(a: float16x4_t) -> int16x4_t;
8578    }
8579    unsafe { _vcvtn_s16_f16(a) }
8580}
8581#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s16_f16)"]
8583#[inline(always)]
8584#[cfg_attr(test, assert_instr(fcvtns))]
8585#[target_feature(enable = "neon,fp16")]
8586#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8587#[cfg(not(target_arch = "arm64ec"))]
8588pub fn vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t {
8589    unsafe extern "unadjusted" {
8590        #[cfg_attr(
8591            any(target_arch = "aarch64", target_arch = "arm64ec"),
8592            link_name = "llvm.aarch64.neon.fcvtns.v8i16.v8f16"
8593        )]
8594        fn _vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t;
8595    }
8596    unsafe { _vcvtnq_s16_f16(a) }
8597}
8598#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"]
8600#[inline(always)]
8601#[target_feature(enable = "neon")]
8602#[cfg_attr(test, assert_instr(fcvtns))]
8603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8604pub fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t {
8605    unsafe extern "unadjusted" {
8606        #[cfg_attr(
8607            any(target_arch = "aarch64", target_arch = "arm64ec"),
8608            link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32"
8609        )]
8610        fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t;
8611    }
8612    unsafe { _vcvtn_s32_f32(a) }
8613}
8614#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"]
8616#[inline(always)]
8617#[target_feature(enable = "neon")]
8618#[cfg_attr(test, assert_instr(fcvtns))]
8619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8620pub fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t {
8621    unsafe extern "unadjusted" {
8622        #[cfg_attr(
8623            any(target_arch = "aarch64", target_arch = "arm64ec"),
8624            link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32"
8625        )]
8626        fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t;
8627    }
8628    unsafe { _vcvtnq_s32_f32(a) }
8629}
8630#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"]
8632#[inline(always)]
8633#[target_feature(enable = "neon")]
8634#[cfg_attr(test, assert_instr(fcvtns))]
8635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8636pub fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t {
8637    unsafe extern "unadjusted" {
8638        #[cfg_attr(
8639            any(target_arch = "aarch64", target_arch = "arm64ec"),
8640            link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64"
8641        )]
8642        fn _vcvtn_s64_f64(a: float64x1_t) -> int64x1_t;
8643    }
8644    unsafe { _vcvtn_s64_f64(a) }
8645}
8646#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"]
8648#[inline(always)]
8649#[target_feature(enable = "neon")]
8650#[cfg_attr(test, assert_instr(fcvtns))]
8651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8652pub fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t {
8653    unsafe extern "unadjusted" {
8654        #[cfg_attr(
8655            any(target_arch = "aarch64", target_arch = "arm64ec"),
8656            link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64"
8657        )]
8658        fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t;
8659    }
8660    unsafe { _vcvtnq_s64_f64(a) }
8661}
8662#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u16_f16)"]
8664#[inline(always)]
8665#[cfg_attr(test, assert_instr(fcvtnu))]
8666#[target_feature(enable = "neon,fp16")]
8667#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8668#[cfg(not(target_arch = "arm64ec"))]
8669pub fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t {
8670    unsafe extern "unadjusted" {
8671        #[cfg_attr(
8672            any(target_arch = "aarch64", target_arch = "arm64ec"),
8673            link_name = "llvm.aarch64.neon.fcvtnu.v4i16.v4f16"
8674        )]
8675        fn _vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t;
8676    }
8677    unsafe { _vcvtn_u16_f16(a) }
8678}
8679#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u16_f16)"]
8681#[inline(always)]
8682#[cfg_attr(test, assert_instr(fcvtnu))]
8683#[target_feature(enable = "neon,fp16")]
8684#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8685#[cfg(not(target_arch = "arm64ec"))]
8686pub fn vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t {
8687    unsafe extern "unadjusted" {
8688        #[cfg_attr(
8689            any(target_arch = "aarch64", target_arch = "arm64ec"),
8690            link_name = "llvm.aarch64.neon.fcvtnu.v8i16.v8f16"
8691        )]
8692        fn _vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t;
8693    }
8694    unsafe { _vcvtnq_u16_f16(a) }
8695}
8696#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"]
8698#[inline(always)]
8699#[target_feature(enable = "neon")]
8700#[cfg_attr(test, assert_instr(fcvtnu))]
8701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8702pub fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t {
8703    unsafe extern "unadjusted" {
8704        #[cfg_attr(
8705            any(target_arch = "aarch64", target_arch = "arm64ec"),
8706            link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32"
8707        )]
8708        fn _vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t;
8709    }
8710    unsafe { _vcvtn_u32_f32(a) }
8711}
8712#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"]
8714#[inline(always)]
8715#[target_feature(enable = "neon")]
8716#[cfg_attr(test, assert_instr(fcvtnu))]
8717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8718pub fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t {
8719    unsafe extern "unadjusted" {
8720        #[cfg_attr(
8721            any(target_arch = "aarch64", target_arch = "arm64ec"),
8722            link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32"
8723        )]
8724        fn _vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t;
8725    }
8726    unsafe { _vcvtnq_u32_f32(a) }
8727}
8728#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"]
8730#[inline(always)]
8731#[target_feature(enable = "neon")]
8732#[cfg_attr(test, assert_instr(fcvtnu))]
8733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8734pub fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t {
8735    unsafe extern "unadjusted" {
8736        #[cfg_attr(
8737            any(target_arch = "aarch64", target_arch = "arm64ec"),
8738            link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64"
8739        )]
8740        fn _vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t;
8741    }
8742    unsafe { _vcvtn_u64_f64(a) }
8743}
8744#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"]
8746#[inline(always)]
8747#[target_feature(enable = "neon")]
8748#[cfg_attr(test, assert_instr(fcvtnu))]
8749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8750pub fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t {
8751    unsafe extern "unadjusted" {
8752        #[cfg_attr(
8753            any(target_arch = "aarch64", target_arch = "arm64ec"),
8754            link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64"
8755        )]
8756        fn _vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t;
8757    }
8758    unsafe { _vcvtnq_u64_f64(a) }
8759}
8760#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s16_f16)"]
8762#[inline(always)]
8763#[cfg_attr(test, assert_instr(fcvtns))]
8764#[target_feature(enable = "neon,fp16")]
8765#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8766#[cfg(not(target_arch = "arm64ec"))]
8767pub fn vcvtnh_s16_f16(a: f16) -> i16 {
8768    vcvtnh_s32_f16(a) as i16
8769}
8770#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s32_f16)"]
8772#[inline(always)]
8773#[cfg_attr(test, assert_instr(fcvtns))]
8774#[target_feature(enable = "neon,fp16")]
8775#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8776#[cfg(not(target_arch = "arm64ec"))]
8777pub fn vcvtnh_s32_f16(a: f16) -> i32 {
8778    unsafe extern "unadjusted" {
8779        #[cfg_attr(
8780            any(target_arch = "aarch64", target_arch = "arm64ec"),
8781            link_name = "llvm.aarch64.neon.fcvtns.i32.f16"
8782        )]
8783        fn _vcvtnh_s32_f16(a: f16) -> i32;
8784    }
8785    unsafe { _vcvtnh_s32_f16(a) }
8786}
8787#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s64_f16)"]
8789#[inline(always)]
8790#[cfg_attr(test, assert_instr(fcvtns))]
8791#[target_feature(enable = "neon,fp16")]
8792#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8793#[cfg(not(target_arch = "arm64ec"))]
8794pub fn vcvtnh_s64_f16(a: f16) -> i64 {
8795    unsafe extern "unadjusted" {
8796        #[cfg_attr(
8797            any(target_arch = "aarch64", target_arch = "arm64ec"),
8798            link_name = "llvm.aarch64.neon.fcvtns.i64.f16"
8799        )]
8800        fn _vcvtnh_s64_f16(a: f16) -> i64;
8801    }
8802    unsafe { _vcvtnh_s64_f16(a) }
8803}
8804#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u16_f16)"]
8806#[inline(always)]
8807#[cfg_attr(test, assert_instr(fcvtnu))]
8808#[target_feature(enable = "neon,fp16")]
8809#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8810#[cfg(not(target_arch = "arm64ec"))]
8811pub fn vcvtnh_u16_f16(a: f16) -> u16 {
8812    vcvtnh_u32_f16(a) as u16
8813}
8814#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u32_f16)"]
8816#[inline(always)]
8817#[cfg_attr(test, assert_instr(fcvtnu))]
8818#[target_feature(enable = "neon,fp16")]
8819#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8820#[cfg(not(target_arch = "arm64ec"))]
8821pub fn vcvtnh_u32_f16(a: f16) -> u32 {
8822    unsafe extern "unadjusted" {
8823        #[cfg_attr(
8824            any(target_arch = "aarch64", target_arch = "arm64ec"),
8825            link_name = "llvm.aarch64.neon.fcvtnu.i32.f16"
8826        )]
8827        fn _vcvtnh_u32_f16(a: f16) -> u32;
8828    }
8829    unsafe { _vcvtnh_u32_f16(a) }
8830}
8831#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u64_f16)"]
8833#[inline(always)]
8834#[cfg_attr(test, assert_instr(fcvtnu))]
8835#[target_feature(enable = "neon,fp16")]
8836#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8837#[cfg(not(target_arch = "arm64ec"))]
8838pub fn vcvtnh_u64_f16(a: f16) -> u64 {
8839    unsafe extern "unadjusted" {
8840        #[cfg_attr(
8841            any(target_arch = "aarch64", target_arch = "arm64ec"),
8842            link_name = "llvm.aarch64.neon.fcvtnu.i64.f16"
8843        )]
8844        fn _vcvtnh_u64_f16(a: f16) -> u64;
8845    }
8846    unsafe { _vcvtnh_u64_f16(a) }
8847}
8848#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"]
8850#[inline(always)]
8851#[target_feature(enable = "neon")]
8852#[cfg_attr(test, assert_instr(fcvtns))]
8853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8854pub fn vcvtns_s32_f32(a: f32) -> i32 {
8855    unsafe extern "unadjusted" {
8856        #[cfg_attr(
8857            any(target_arch = "aarch64", target_arch = "arm64ec"),
8858            link_name = "llvm.aarch64.neon.fcvtns.i32.f32"
8859        )]
8860        fn _vcvtns_s32_f32(a: f32) -> i32;
8861    }
8862    unsafe { _vcvtns_s32_f32(a) }
8863}
8864#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"]
8866#[inline(always)]
8867#[target_feature(enable = "neon")]
8868#[cfg_attr(test, assert_instr(fcvtns))]
8869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8870pub fn vcvtnd_s64_f64(a: f64) -> i64 {
8871    unsafe extern "unadjusted" {
8872        #[cfg_attr(
8873            any(target_arch = "aarch64", target_arch = "arm64ec"),
8874            link_name = "llvm.aarch64.neon.fcvtns.i64.f64"
8875        )]
8876        fn _vcvtnd_s64_f64(a: f64) -> i64;
8877    }
8878    unsafe { _vcvtnd_s64_f64(a) }
8879}
8880#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"]
8882#[inline(always)]
8883#[target_feature(enable = "neon")]
8884#[cfg_attr(test, assert_instr(fcvtnu))]
8885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8886pub fn vcvtns_u32_f32(a: f32) -> u32 {
8887    unsafe extern "unadjusted" {
8888        #[cfg_attr(
8889            any(target_arch = "aarch64", target_arch = "arm64ec"),
8890            link_name = "llvm.aarch64.neon.fcvtnu.i32.f32"
8891        )]
8892        fn _vcvtns_u32_f32(a: f32) -> u32;
8893    }
8894    unsafe { _vcvtns_u32_f32(a) }
8895}
8896#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"]
8898#[inline(always)]
8899#[target_feature(enable = "neon")]
8900#[cfg_attr(test, assert_instr(fcvtnu))]
8901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8902pub fn vcvtnd_u64_f64(a: f64) -> u64 {
8903    unsafe extern "unadjusted" {
8904        #[cfg_attr(
8905            any(target_arch = "aarch64", target_arch = "arm64ec"),
8906            link_name = "llvm.aarch64.neon.fcvtnu.i64.f64"
8907        )]
8908        fn _vcvtnd_u64_f64(a: f64) -> u64;
8909    }
8910    unsafe { _vcvtnd_u64_f64(a) }
8911}
8912#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s16_f16)"]
8914#[inline(always)]
8915#[cfg_attr(test, assert_instr(fcvtps))]
8916#[target_feature(enable = "neon,fp16")]
8917#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8918#[cfg(not(target_arch = "arm64ec"))]
8919pub fn vcvtp_s16_f16(a: float16x4_t) -> int16x4_t {
8920    unsafe extern "unadjusted" {
8921        #[cfg_attr(
8922            any(target_arch = "aarch64", target_arch = "arm64ec"),
8923            link_name = "llvm.aarch64.neon.fcvtps.v4i16.v4f16"
8924        )]
8925        fn _vcvtp_s16_f16(a: float16x4_t) -> int16x4_t;
8926    }
8927    unsafe { _vcvtp_s16_f16(a) }
8928}
8929#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s16_f16)"]
8931#[inline(always)]
8932#[cfg_attr(test, assert_instr(fcvtps))]
8933#[target_feature(enable = "neon,fp16")]
8934#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8935#[cfg(not(target_arch = "arm64ec"))]
8936pub fn vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t {
8937    unsafe extern "unadjusted" {
8938        #[cfg_attr(
8939            any(target_arch = "aarch64", target_arch = "arm64ec"),
8940            link_name = "llvm.aarch64.neon.fcvtps.v8i16.v8f16"
8941        )]
8942        fn _vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t;
8943    }
8944    unsafe { _vcvtpq_s16_f16(a) }
8945}
8946#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"]
8948#[inline(always)]
8949#[target_feature(enable = "neon")]
8950#[cfg_attr(test, assert_instr(fcvtps))]
8951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8952pub fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t {
8953    unsafe extern "unadjusted" {
8954        #[cfg_attr(
8955            any(target_arch = "aarch64", target_arch = "arm64ec"),
8956            link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32"
8957        )]
8958        fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t;
8959    }
8960    unsafe { _vcvtp_s32_f32(a) }
8961}
8962#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"]
8964#[inline(always)]
8965#[target_feature(enable = "neon")]
8966#[cfg_attr(test, assert_instr(fcvtps))]
8967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8968pub fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t {
8969    unsafe extern "unadjusted" {
8970        #[cfg_attr(
8971            any(target_arch = "aarch64", target_arch = "arm64ec"),
8972            link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32"
8973        )]
8974        fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t;
8975    }
8976    unsafe { _vcvtpq_s32_f32(a) }
8977}
8978#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"]
8980#[inline(always)]
8981#[target_feature(enable = "neon")]
8982#[cfg_attr(test, assert_instr(fcvtps))]
8983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8984pub fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t {
8985    unsafe extern "unadjusted" {
8986        #[cfg_attr(
8987            any(target_arch = "aarch64", target_arch = "arm64ec"),
8988            link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64"
8989        )]
8990        fn _vcvtp_s64_f64(a: float64x1_t) -> int64x1_t;
8991    }
8992    unsafe { _vcvtp_s64_f64(a) }
8993}
8994#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"]
8996#[inline(always)]
8997#[target_feature(enable = "neon")]
8998#[cfg_attr(test, assert_instr(fcvtps))]
8999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9000pub fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t {
9001    unsafe extern "unadjusted" {
9002        #[cfg_attr(
9003            any(target_arch = "aarch64", target_arch = "arm64ec"),
9004            link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64"
9005        )]
9006        fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t;
9007    }
9008    unsafe { _vcvtpq_s64_f64(a) }
9009}
9010#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u16_f16)"]
9012#[inline(always)]
9013#[cfg_attr(test, assert_instr(fcvtpu))]
9014#[target_feature(enable = "neon,fp16")]
9015#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9016#[cfg(not(target_arch = "arm64ec"))]
9017pub fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t {
9018    unsafe extern "unadjusted" {
9019        #[cfg_attr(
9020            any(target_arch = "aarch64", target_arch = "arm64ec"),
9021            link_name = "llvm.aarch64.neon.fcvtpu.v4i16.v4f16"
9022        )]
9023        fn _vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t;
9024    }
9025    unsafe { _vcvtp_u16_f16(a) }
9026}
9027#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u16_f16)"]
9029#[inline(always)]
9030#[cfg_attr(test, assert_instr(fcvtpu))]
9031#[target_feature(enable = "neon,fp16")]
9032#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9033#[cfg(not(target_arch = "arm64ec"))]
9034pub fn vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t {
9035    unsafe extern "unadjusted" {
9036        #[cfg_attr(
9037            any(target_arch = "aarch64", target_arch = "arm64ec"),
9038            link_name = "llvm.aarch64.neon.fcvtpu.v8i16.v8f16"
9039        )]
9040        fn _vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t;
9041    }
9042    unsafe { _vcvtpq_u16_f16(a) }
9043}
9044#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"]
9046#[inline(always)]
9047#[target_feature(enable = "neon")]
9048#[cfg_attr(test, assert_instr(fcvtpu))]
9049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9050pub fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t {
9051    unsafe extern "unadjusted" {
9052        #[cfg_attr(
9053            any(target_arch = "aarch64", target_arch = "arm64ec"),
9054            link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32"
9055        )]
9056        fn _vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t;
9057    }
9058    unsafe { _vcvtp_u32_f32(a) }
9059}
9060#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"]
9062#[inline(always)]
9063#[target_feature(enable = "neon")]
9064#[cfg_attr(test, assert_instr(fcvtpu))]
9065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9066pub fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t {
9067    unsafe extern "unadjusted" {
9068        #[cfg_attr(
9069            any(target_arch = "aarch64", target_arch = "arm64ec"),
9070            link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32"
9071        )]
9072        fn _vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t;
9073    }
9074    unsafe { _vcvtpq_u32_f32(a) }
9075}
9076#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"]
9078#[inline(always)]
9079#[target_feature(enable = "neon")]
9080#[cfg_attr(test, assert_instr(fcvtpu))]
9081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9082pub fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t {
9083    unsafe extern "unadjusted" {
9084        #[cfg_attr(
9085            any(target_arch = "aarch64", target_arch = "arm64ec"),
9086            link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64"
9087        )]
9088        fn _vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t;
9089    }
9090    unsafe { _vcvtp_u64_f64(a) }
9091}
9092#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"]
9094#[inline(always)]
9095#[target_feature(enable = "neon")]
9096#[cfg_attr(test, assert_instr(fcvtpu))]
9097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9098pub fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t {
9099    unsafe extern "unadjusted" {
9100        #[cfg_attr(
9101            any(target_arch = "aarch64", target_arch = "arm64ec"),
9102            link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64"
9103        )]
9104        fn _vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t;
9105    }
9106    unsafe { _vcvtpq_u64_f64(a) }
9107}
9108#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s16_f16)"]
9110#[inline(always)]
9111#[cfg_attr(test, assert_instr(fcvtps))]
9112#[target_feature(enable = "neon,fp16")]
9113#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9114#[cfg(not(target_arch = "arm64ec"))]
9115pub fn vcvtph_s16_f16(a: f16) -> i16 {
9116    vcvtph_s32_f16(a) as i16
9117}
9118#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s32_f16)"]
9120#[inline(always)]
9121#[cfg_attr(test, assert_instr(fcvtps))]
9122#[target_feature(enable = "neon,fp16")]
9123#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9124#[cfg(not(target_arch = "arm64ec"))]
9125pub fn vcvtph_s32_f16(a: f16) -> i32 {
9126    unsafe extern "unadjusted" {
9127        #[cfg_attr(
9128            any(target_arch = "aarch64", target_arch = "arm64ec"),
9129            link_name = "llvm.aarch64.neon.fcvtps.i32.f16"
9130        )]
9131        fn _vcvtph_s32_f16(a: f16) -> i32;
9132    }
9133    unsafe { _vcvtph_s32_f16(a) }
9134}
9135#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s64_f16)"]
9137#[inline(always)]
9138#[cfg_attr(test, assert_instr(fcvtps))]
9139#[target_feature(enable = "neon,fp16")]
9140#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9141#[cfg(not(target_arch = "arm64ec"))]
9142pub fn vcvtph_s64_f16(a: f16) -> i64 {
9143    unsafe extern "unadjusted" {
9144        #[cfg_attr(
9145            any(target_arch = "aarch64", target_arch = "arm64ec"),
9146            link_name = "llvm.aarch64.neon.fcvtps.i64.f16"
9147        )]
9148        fn _vcvtph_s64_f16(a: f16) -> i64;
9149    }
9150    unsafe { _vcvtph_s64_f16(a) }
9151}
9152#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u16_f16)"]
9154#[inline(always)]
9155#[cfg_attr(test, assert_instr(fcvtpu))]
9156#[target_feature(enable = "neon,fp16")]
9157#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9158#[cfg(not(target_arch = "arm64ec"))]
9159pub fn vcvtph_u16_f16(a: f16) -> u16 {
9160    vcvtph_u32_f16(a) as u16
9161}
9162#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9163#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u32_f16)"]
9164#[inline(always)]
9165#[cfg_attr(test, assert_instr(fcvtpu))]
9166#[target_feature(enable = "neon,fp16")]
9167#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9168#[cfg(not(target_arch = "arm64ec"))]
9169pub fn vcvtph_u32_f16(a: f16) -> u32 {
9170    unsafe extern "unadjusted" {
9171        #[cfg_attr(
9172            any(target_arch = "aarch64", target_arch = "arm64ec"),
9173            link_name = "llvm.aarch64.neon.fcvtpu.i32.f16"
9174        )]
9175        fn _vcvtph_u32_f16(a: f16) -> u32;
9176    }
9177    unsafe { _vcvtph_u32_f16(a) }
9178}
9179#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u64_f16)"]
9181#[inline(always)]
9182#[cfg_attr(test, assert_instr(fcvtpu))]
9183#[target_feature(enable = "neon,fp16")]
9184#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9185#[cfg(not(target_arch = "arm64ec"))]
9186pub fn vcvtph_u64_f16(a: f16) -> u64 {
9187    unsafe extern "unadjusted" {
9188        #[cfg_attr(
9189            any(target_arch = "aarch64", target_arch = "arm64ec"),
9190            link_name = "llvm.aarch64.neon.fcvtpu.i64.f16"
9191        )]
9192        fn _vcvtph_u64_f16(a: f16) -> u64;
9193    }
9194    unsafe { _vcvtph_u64_f16(a) }
9195}
9196#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"]
9198#[inline(always)]
9199#[target_feature(enable = "neon")]
9200#[cfg_attr(test, assert_instr(fcvtps))]
9201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9202pub fn vcvtps_s32_f32(a: f32) -> i32 {
9203    unsafe extern "unadjusted" {
9204        #[cfg_attr(
9205            any(target_arch = "aarch64", target_arch = "arm64ec"),
9206            link_name = "llvm.aarch64.neon.fcvtps.i32.f32"
9207        )]
9208        fn _vcvtps_s32_f32(a: f32) -> i32;
9209    }
9210    unsafe { _vcvtps_s32_f32(a) }
9211}
9212#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"]
9214#[inline(always)]
9215#[target_feature(enable = "neon")]
9216#[cfg_attr(test, assert_instr(fcvtps))]
9217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9218pub fn vcvtpd_s64_f64(a: f64) -> i64 {
9219    unsafe extern "unadjusted" {
9220        #[cfg_attr(
9221            any(target_arch = "aarch64", target_arch = "arm64ec"),
9222            link_name = "llvm.aarch64.neon.fcvtps.i64.f64"
9223        )]
9224        fn _vcvtpd_s64_f64(a: f64) -> i64;
9225    }
9226    unsafe { _vcvtpd_s64_f64(a) }
9227}
9228#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"]
9230#[inline(always)]
9231#[target_feature(enable = "neon")]
9232#[cfg_attr(test, assert_instr(fcvtpu))]
9233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9234pub fn vcvtps_u32_f32(a: f32) -> u32 {
9235    unsafe extern "unadjusted" {
9236        #[cfg_attr(
9237            any(target_arch = "aarch64", target_arch = "arm64ec"),
9238            link_name = "llvm.aarch64.neon.fcvtpu.i32.f32"
9239        )]
9240        fn _vcvtps_u32_f32(a: f32) -> u32;
9241    }
9242    unsafe { _vcvtps_u32_f32(a) }
9243}
9244#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"]
9246#[inline(always)]
9247#[target_feature(enable = "neon")]
9248#[cfg_attr(test, assert_instr(fcvtpu))]
9249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9250pub fn vcvtpd_u64_f64(a: f64) -> u64 {
9251    unsafe extern "unadjusted" {
9252        #[cfg_attr(
9253            any(target_arch = "aarch64", target_arch = "arm64ec"),
9254            link_name = "llvm.aarch64.neon.fcvtpu.i64.f64"
9255        )]
9256        fn _vcvtpd_u64_f64(a: f64) -> u64;
9257    }
9258    unsafe { _vcvtpd_u64_f64(a) }
9259}
9260#[doc = "Fixed-point convert to floating-point"]
9261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"]
9262#[inline(always)]
9263#[target_feature(enable = "neon")]
9264#[cfg_attr(test, assert_instr(ucvtf))]
9265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9266pub fn vcvts_f32_u32(a: u32) -> f32 {
9267    a as f32
9268}
9269#[doc = "Fixed-point convert to floating-point"]
9270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"]
9271#[inline(always)]
9272#[target_feature(enable = "neon")]
9273#[cfg_attr(test, assert_instr(ucvtf))]
9274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9275pub fn vcvtd_f64_u64(a: u64) -> f64 {
9276    a as f64
9277}
9278#[doc = "Fixed-point convert to floating-point"]
9279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"]
9280#[inline(always)]
9281#[target_feature(enable = "neon")]
9282#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9283#[rustc_legacy_const_generics(1)]
9284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9285pub fn vcvts_n_f32_s32<const N: i32>(a: i32) -> f32 {
9286    static_assert!(N >= 1 && N <= 64);
9287    unsafe extern "unadjusted" {
9288        #[cfg_attr(
9289            any(target_arch = "aarch64", target_arch = "arm64ec"),
9290            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32"
9291        )]
9292        fn _vcvts_n_f32_s32(a: i32, n: i32) -> f32;
9293    }
9294    unsafe { _vcvts_n_f32_s32(a, N) }
9295}
9296#[doc = "Fixed-point convert to floating-point"]
9297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"]
9298#[inline(always)]
9299#[target_feature(enable = "neon")]
9300#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9301#[rustc_legacy_const_generics(1)]
9302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9303pub fn vcvtd_n_f64_s64<const N: i32>(a: i64) -> f64 {
9304    static_assert!(N >= 1 && N <= 64);
9305    unsafe extern "unadjusted" {
9306        #[cfg_attr(
9307            any(target_arch = "aarch64", target_arch = "arm64ec"),
9308            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64"
9309        )]
9310        fn _vcvtd_n_f64_s64(a: i64, n: i32) -> f64;
9311    }
9312    unsafe { _vcvtd_n_f64_s64(a, N) }
9313}
9314#[doc = "Fixed-point convert to floating-point"]
9315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"]
9316#[inline(always)]
9317#[target_feature(enable = "neon")]
9318#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9319#[rustc_legacy_const_generics(1)]
9320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9321pub fn vcvts_n_f32_u32<const N: i32>(a: u32) -> f32 {
9322    static_assert!(N >= 1 && N <= 32);
9323    unsafe extern "unadjusted" {
9324        #[cfg_attr(
9325            any(target_arch = "aarch64", target_arch = "arm64ec"),
9326            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32"
9327        )]
9328        fn _vcvts_n_f32_u32(a: u32, n: i32) -> f32;
9329    }
9330    unsafe { _vcvts_n_f32_u32(a, N) }
9331}
9332#[doc = "Fixed-point convert to floating-point"]
9333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"]
9334#[inline(always)]
9335#[target_feature(enable = "neon")]
9336#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9337#[rustc_legacy_const_generics(1)]
9338#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9339pub fn vcvtd_n_f64_u64<const N: i32>(a: u64) -> f64 {
9340    static_assert!(N >= 1 && N <= 64);
9341    unsafe extern "unadjusted" {
9342        #[cfg_attr(
9343            any(target_arch = "aarch64", target_arch = "arm64ec"),
9344            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64"
9345        )]
9346        fn _vcvtd_n_f64_u64(a: u64, n: i32) -> f64;
9347    }
9348    unsafe { _vcvtd_n_f64_u64(a, N) }
9349}
9350#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"]
9352#[inline(always)]
9353#[target_feature(enable = "neon")]
9354#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9355#[rustc_legacy_const_generics(1)]
9356#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9357pub fn vcvts_n_s32_f32<const N: i32>(a: f32) -> i32 {
9358    static_assert!(N >= 1 && N <= 32);
9359    unsafe extern "unadjusted" {
9360        #[cfg_attr(
9361            any(target_arch = "aarch64", target_arch = "arm64ec"),
9362            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32"
9363        )]
9364        fn _vcvts_n_s32_f32(a: f32, n: i32) -> i32;
9365    }
9366    unsafe { _vcvts_n_s32_f32(a, N) }
9367}
9368#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"]
9370#[inline(always)]
9371#[target_feature(enable = "neon")]
9372#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9373#[rustc_legacy_const_generics(1)]
9374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9375pub fn vcvtd_n_s64_f64<const N: i32>(a: f64) -> i64 {
9376    static_assert!(N >= 1 && N <= 64);
9377    unsafe extern "unadjusted" {
9378        #[cfg_attr(
9379            any(target_arch = "aarch64", target_arch = "arm64ec"),
9380            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64"
9381        )]
9382        fn _vcvtd_n_s64_f64(a: f64, n: i32) -> i64;
9383    }
9384    unsafe { _vcvtd_n_s64_f64(a, N) }
9385}
9386#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"]
9388#[inline(always)]
9389#[target_feature(enable = "neon")]
9390#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9391#[rustc_legacy_const_generics(1)]
9392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9393pub fn vcvts_n_u32_f32<const N: i32>(a: f32) -> u32 {
9394    static_assert!(N >= 1 && N <= 32);
9395    unsafe extern "unadjusted" {
9396        #[cfg_attr(
9397            any(target_arch = "aarch64", target_arch = "arm64ec"),
9398            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32"
9399        )]
9400        fn _vcvts_n_u32_f32(a: f32, n: i32) -> u32;
9401    }
9402    unsafe { _vcvts_n_u32_f32(a, N) }
9403}
9404#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"]
9406#[inline(always)]
9407#[target_feature(enable = "neon")]
9408#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9409#[rustc_legacy_const_generics(1)]
9410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9411pub fn vcvtd_n_u64_f64<const N: i32>(a: f64) -> u64 {
9412    static_assert!(N >= 1 && N <= 64);
9413    unsafe extern "unadjusted" {
9414        #[cfg_attr(
9415            any(target_arch = "aarch64", target_arch = "arm64ec"),
9416            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64"
9417        )]
9418        fn _vcvtd_n_u64_f64(a: f64, n: i32) -> u64;
9419    }
9420    unsafe { _vcvtd_n_u64_f64(a, N) }
9421}
9422#[doc = "Fixed-point convert to floating-point"]
9423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"]
9424#[inline(always)]
9425#[target_feature(enable = "neon")]
9426#[cfg_attr(test, assert_instr(fcvtzs))]
9427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9428pub fn vcvts_s32_f32(a: f32) -> i32 {
9429    a as i32
9430}
9431#[doc = "Fixed-point convert to floating-point"]
9432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"]
9433#[inline(always)]
9434#[target_feature(enable = "neon")]
9435#[cfg_attr(test, assert_instr(fcvtzs))]
9436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9437pub fn vcvtd_s64_f64(a: f64) -> i64 {
9438    a as i64
9439}
9440#[doc = "Fixed-point convert to floating-point"]
9441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"]
9442#[inline(always)]
9443#[target_feature(enable = "neon")]
9444#[cfg_attr(test, assert_instr(fcvtzu))]
9445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9446pub fn vcvts_u32_f32(a: f32) -> u32 {
9447    a as u32
9448}
9449#[doc = "Fixed-point convert to floating-point"]
9450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"]
9451#[inline(always)]
9452#[target_feature(enable = "neon")]
9453#[cfg_attr(test, assert_instr(fcvtzu))]
9454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9455pub fn vcvtd_u64_f64(a: f64) -> u64 {
9456    a as u64
9457}
9458#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"]
9460#[inline(always)]
9461#[target_feature(enable = "neon")]
9462#[cfg_attr(test, assert_instr(fcvtxn))]
9463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9464pub fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t {
9465    unsafe extern "unadjusted" {
9466        #[cfg_attr(
9467            any(target_arch = "aarch64", target_arch = "arm64ec"),
9468            link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64"
9469        )]
9470        fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t;
9471    }
9472    unsafe { _vcvtx_f32_f64(a) }
9473}
9474#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"]
9476#[inline(always)]
9477#[target_feature(enable = "neon")]
9478#[cfg_attr(test, assert_instr(fcvtxn2))]
9479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9480pub fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
9481    unsafe { simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) }
9482}
9483#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"]
9485#[inline(always)]
9486#[target_feature(enable = "neon")]
9487#[cfg_attr(test, assert_instr(fcvtxn))]
9488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9489pub fn vcvtxd_f32_f64(a: f64) -> f32 {
9490    unsafe { simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0) }
9491}
9492#[doc = "Divide"]
9493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f16)"]
9494#[inline(always)]
9495#[target_feature(enable = "neon,fp16")]
9496#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9497#[cfg(not(target_arch = "arm64ec"))]
9498#[cfg_attr(test, assert_instr(fdiv))]
9499pub fn vdiv_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
9500    unsafe { simd_div(a, b) }
9501}
9502#[doc = "Divide"]
9503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f16)"]
9504#[inline(always)]
9505#[target_feature(enable = "neon,fp16")]
9506#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9507#[cfg(not(target_arch = "arm64ec"))]
9508#[cfg_attr(test, assert_instr(fdiv))]
9509pub fn vdivq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
9510    unsafe { simd_div(a, b) }
9511}
9512#[doc = "Divide"]
9513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"]
9514#[inline(always)]
9515#[target_feature(enable = "neon")]
9516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9517#[cfg_attr(test, assert_instr(fdiv))]
9518pub fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
9519    unsafe { simd_div(a, b) }
9520}
9521#[doc = "Divide"]
9522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"]
9523#[inline(always)]
9524#[target_feature(enable = "neon")]
9525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9526#[cfg_attr(test, assert_instr(fdiv))]
9527pub fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
9528    unsafe { simd_div(a, b) }
9529}
9530#[doc = "Divide"]
9531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"]
9532#[inline(always)]
9533#[target_feature(enable = "neon")]
9534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9535#[cfg_attr(test, assert_instr(fdiv))]
9536pub fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
9537    unsafe { simd_div(a, b) }
9538}
9539#[doc = "Divide"]
9540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"]
9541#[inline(always)]
9542#[target_feature(enable = "neon")]
9543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9544#[cfg_attr(test, assert_instr(fdiv))]
9545pub fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9546    unsafe { simd_div(a, b) }
9547}
9548#[doc = "Divide"]
9549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivh_f16)"]
9550#[inline(always)]
9551#[target_feature(enable = "neon,fp16")]
9552#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9553#[cfg(not(target_arch = "arm64ec"))]
9554#[cfg_attr(test, assert_instr(fdiv))]
9555pub fn vdivh_f16(a: f16, b: f16) -> f16 {
9556    a / b
9557}
9558#[doc = "Set all vector lanes to the same value"]
9559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"]
9560#[inline(always)]
9561#[target_feature(enable = "neon")]
9562#[cfg_attr(test, assert_instr(nop, N = 0))]
9563#[rustc_legacy_const_generics(1)]
9564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9565pub fn vdup_lane_f64<const N: i32>(a: float64x1_t) -> float64x1_t {
9566    static_assert!(N == 0);
9567    a
9568}
9569#[doc = "Set all vector lanes to the same value"]
9570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"]
9571#[inline(always)]
9572#[target_feature(enable = "neon")]
9573#[cfg_attr(test, assert_instr(nop, N = 0))]
9574#[rustc_legacy_const_generics(1)]
9575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9576pub fn vdup_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x1_t {
9577    static_assert!(N == 0);
9578    a
9579}
9580#[doc = "Set all vector lanes to the same value"]
9581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"]
9582#[inline(always)]
9583#[target_feature(enable = "neon")]
9584#[cfg_attr(test, assert_instr(nop, N = 1))]
9585#[rustc_legacy_const_generics(1)]
9586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9587pub fn vdup_laneq_f64<const N: i32>(a: float64x2_t) -> float64x1_t {
9588    static_assert_uimm_bits!(N, 1);
9589    unsafe { transmute::<f64, _>(simd_extract!(a, N as u32)) }
9590}
9591#[doc = "Set all vector lanes to the same value"]
9592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"]
9593#[inline(always)]
9594#[target_feature(enable = "neon")]
9595#[cfg_attr(test, assert_instr(nop, N = 1))]
9596#[rustc_legacy_const_generics(1)]
9597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9598pub fn vdup_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x1_t {
9599    static_assert_uimm_bits!(N, 1);
9600    unsafe { transmute::<u64, _>(simd_extract!(a, N as u32)) }
9601}
9602#[doc = "Set all vector lanes to the same value"]
9603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"]
9604#[inline(always)]
9605#[target_feature(enable = "neon")]
9606#[cfg_attr(test, assert_instr(nop, N = 4))]
9607#[rustc_legacy_const_generics(1)]
9608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9609pub fn vdupb_lane_s8<const N: i32>(a: int8x8_t) -> i8 {
9610    static_assert_uimm_bits!(N, 3);
9611    unsafe { simd_extract!(a, N as u32) }
9612}
9613#[doc = "Set all vector lanes to the same value"]
9614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"]
9615#[inline(always)]
9616#[target_feature(enable = "neon")]
9617#[cfg_attr(test, assert_instr(nop, N = 4))]
9618#[rustc_legacy_const_generics(1)]
9619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9620pub fn vduph_laneq_s16<const N: i32>(a: int16x8_t) -> i16 {
9621    static_assert_uimm_bits!(N, 3);
9622    unsafe { simd_extract!(a, N as u32) }
9623}
9624#[doc = "Set all vector lanes to the same value"]
9625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"]
9626#[inline(always)]
9627#[target_feature(enable = "neon")]
9628#[cfg_attr(test, assert_instr(nop, N = 4))]
9629#[rustc_legacy_const_generics(1)]
9630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9631pub fn vdupb_lane_u8<const N: i32>(a: uint8x8_t) -> u8 {
9632    static_assert_uimm_bits!(N, 3);
9633    unsafe { simd_extract!(a, N as u32) }
9634}
9635#[doc = "Set all vector lanes to the same value"]
9636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"]
9637#[inline(always)]
9638#[target_feature(enable = "neon")]
9639#[cfg_attr(test, assert_instr(nop, N = 4))]
9640#[rustc_legacy_const_generics(1)]
9641#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9642pub fn vduph_laneq_u16<const N: i32>(a: uint16x8_t) -> u16 {
9643    static_assert_uimm_bits!(N, 3);
9644    unsafe { simd_extract!(a, N as u32) }
9645}
9646#[doc = "Set all vector lanes to the same value"]
9647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"]
9648#[inline(always)]
9649#[target_feature(enable = "neon")]
9650#[cfg_attr(test, assert_instr(nop, N = 4))]
9651#[rustc_legacy_const_generics(1)]
9652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9653pub fn vdupb_lane_p8<const N: i32>(a: poly8x8_t) -> p8 {
9654    static_assert_uimm_bits!(N, 3);
9655    unsafe { simd_extract!(a, N as u32) }
9656}
9657#[doc = "Set all vector lanes to the same value"]
9658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"]
9659#[inline(always)]
9660#[target_feature(enable = "neon")]
9661#[cfg_attr(test, assert_instr(nop, N = 4))]
9662#[rustc_legacy_const_generics(1)]
9663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9664pub fn vduph_laneq_p16<const N: i32>(a: poly16x8_t) -> p16 {
9665    static_assert_uimm_bits!(N, 3);
9666    unsafe { simd_extract!(a, N as u32) }
9667}
9668#[doc = "Extract an element from a vector"]
9669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"]
9670#[inline(always)]
9671#[target_feature(enable = "neon")]
9672#[cfg_attr(test, assert_instr(nop, N = 8))]
9673#[rustc_legacy_const_generics(1)]
9674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9675pub fn vdupb_laneq_s8<const N: i32>(a: int8x16_t) -> i8 {
9676    static_assert_uimm_bits!(N, 4);
9677    unsafe { simd_extract!(a, N as u32) }
9678}
9679#[doc = "Extract an element from a vector"]
9680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"]
9681#[inline(always)]
9682#[target_feature(enable = "neon")]
9683#[cfg_attr(test, assert_instr(nop, N = 8))]
9684#[rustc_legacy_const_generics(1)]
9685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9686pub fn vdupb_laneq_u8<const N: i32>(a: uint8x16_t) -> u8 {
9687    static_assert_uimm_bits!(N, 4);
9688    unsafe { simd_extract!(a, N as u32) }
9689}
9690#[doc = "Extract an element from a vector"]
9691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"]
9692#[inline(always)]
9693#[target_feature(enable = "neon")]
9694#[cfg_attr(test, assert_instr(nop, N = 8))]
9695#[rustc_legacy_const_generics(1)]
9696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9697pub fn vdupb_laneq_p8<const N: i32>(a: poly8x16_t) -> p8 {
9698    static_assert_uimm_bits!(N, 4);
9699    unsafe { simd_extract!(a, N as u32) }
9700}
9701#[doc = "Set all vector lanes to the same value"]
9702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"]
9703#[inline(always)]
9704#[target_feature(enable = "neon")]
9705#[cfg_attr(test, assert_instr(nop, N = 0))]
9706#[rustc_legacy_const_generics(1)]
9707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9708pub fn vdupd_lane_f64<const N: i32>(a: float64x1_t) -> f64 {
9709    static_assert!(N == 0);
9710    unsafe { simd_extract!(a, N as u32) }
9711}
9712#[doc = "Set all vector lanes to the same value"]
9713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"]
9714#[inline(always)]
9715#[target_feature(enable = "neon")]
9716#[cfg_attr(test, assert_instr(nop, N = 0))]
9717#[rustc_legacy_const_generics(1)]
9718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9719pub fn vdupd_lane_s64<const N: i32>(a: int64x1_t) -> i64 {
9720    static_assert!(N == 0);
9721    unsafe { simd_extract!(a, N as u32) }
9722}
9723#[doc = "Set all vector lanes to the same value"]
9724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"]
9725#[inline(always)]
9726#[target_feature(enable = "neon")]
9727#[cfg_attr(test, assert_instr(nop, N = 0))]
9728#[rustc_legacy_const_generics(1)]
9729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9730pub fn vdupd_lane_u64<const N: i32>(a: uint64x1_t) -> u64 {
9731    static_assert!(N == 0);
9732    unsafe { simd_extract!(a, N as u32) }
9733}
9734#[doc = "Set all vector lanes to the same value"]
9735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_f16)"]
9736#[inline(always)]
9737#[cfg_attr(test, assert_instr(nop, N = 2))]
9738#[rustc_legacy_const_generics(1)]
9739#[target_feature(enable = "neon,fp16")]
9740#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9741#[cfg(not(target_arch = "arm64ec"))]
9742pub fn vduph_lane_f16<const N: i32>(a: float16x4_t) -> f16 {
9743    static_assert_uimm_bits!(N, 2);
9744    unsafe { simd_extract!(a, N as u32) }
9745}
9746#[doc = "Extract an element from a vector"]
9747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_f16)"]
9748#[inline(always)]
9749#[cfg_attr(test, assert_instr(nop, N = 4))]
9750#[rustc_legacy_const_generics(1)]
9751#[target_feature(enable = "neon,fp16")]
9752#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9753#[cfg(not(target_arch = "arm64ec"))]
9754pub fn vduph_laneq_f16<const N: i32>(a: float16x8_t) -> f16 {
9755    static_assert_uimm_bits!(N, 4);
9756    unsafe { simd_extract!(a, N as u32) }
9757}
9758#[doc = "Set all vector lanes to the same value"]
9759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"]
9760#[inline(always)]
9761#[target_feature(enable = "neon")]
9762#[cfg_attr(test, assert_instr(dup, N = 0))]
9763#[rustc_legacy_const_generics(1)]
9764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9765pub fn vdupq_lane_f64<const N: i32>(a: float64x1_t) -> float64x2_t {
9766    static_assert!(N == 0);
9767    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9768}
9769#[doc = "Set all vector lanes to the same value"]
9770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"]
9771#[inline(always)]
9772#[target_feature(enable = "neon")]
9773#[cfg_attr(test, assert_instr(dup, N = 0))]
9774#[rustc_legacy_const_generics(1)]
9775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9776pub fn vdupq_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x2_t {
9777    static_assert!(N == 0);
9778    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9779}
9780#[doc = "Set all vector lanes to the same value"]
9781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"]
9782#[inline(always)]
9783#[target_feature(enable = "neon")]
9784#[cfg_attr(test, assert_instr(dup, N = 1))]
9785#[rustc_legacy_const_generics(1)]
9786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9787pub fn vdupq_laneq_f64<const N: i32>(a: float64x2_t) -> float64x2_t {
9788    static_assert_uimm_bits!(N, 1);
9789    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9790}
9791#[doc = "Set all vector lanes to the same value"]
9792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"]
9793#[inline(always)]
9794#[target_feature(enable = "neon")]
9795#[cfg_attr(test, assert_instr(dup, N = 1))]
9796#[rustc_legacy_const_generics(1)]
9797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9798pub fn vdupq_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x2_t {
9799    static_assert_uimm_bits!(N, 1);
9800    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9801}
9802#[doc = "Set all vector lanes to the same value"]
9803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"]
9804#[inline(always)]
9805#[target_feature(enable = "neon")]
9806#[cfg_attr(test, assert_instr(nop, N = 1))]
9807#[rustc_legacy_const_generics(1)]
9808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9809pub fn vdups_lane_f32<const N: i32>(a: float32x2_t) -> f32 {
9810    static_assert_uimm_bits!(N, 1);
9811    unsafe { simd_extract!(a, N as u32) }
9812}
9813#[doc = "Set all vector lanes to the same value"]
9814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"]
9815#[inline(always)]
9816#[target_feature(enable = "neon")]
9817#[cfg_attr(test, assert_instr(nop, N = 1))]
9818#[rustc_legacy_const_generics(1)]
9819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9820pub fn vdupd_laneq_f64<const N: i32>(a: float64x2_t) -> f64 {
9821    static_assert_uimm_bits!(N, 1);
9822    unsafe { simd_extract!(a, N as u32) }
9823}
9824#[doc = "Set all vector lanes to the same value"]
9825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"]
9826#[inline(always)]
9827#[target_feature(enable = "neon")]
9828#[cfg_attr(test, assert_instr(nop, N = 1))]
9829#[rustc_legacy_const_generics(1)]
9830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9831pub fn vdups_lane_s32<const N: i32>(a: int32x2_t) -> i32 {
9832    static_assert_uimm_bits!(N, 1);
9833    unsafe { simd_extract!(a, N as u32) }
9834}
9835#[doc = "Set all vector lanes to the same value"]
9836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"]
9837#[inline(always)]
9838#[target_feature(enable = "neon")]
9839#[cfg_attr(test, assert_instr(nop, N = 1))]
9840#[rustc_legacy_const_generics(1)]
9841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9842pub fn vdupd_laneq_s64<const N: i32>(a: int64x2_t) -> i64 {
9843    static_assert_uimm_bits!(N, 1);
9844    unsafe { simd_extract!(a, N as u32) }
9845}
9846#[doc = "Set all vector lanes to the same value"]
9847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"]
9848#[inline(always)]
9849#[target_feature(enable = "neon")]
9850#[cfg_attr(test, assert_instr(nop, N = 1))]
9851#[rustc_legacy_const_generics(1)]
9852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9853pub fn vdups_lane_u32<const N: i32>(a: uint32x2_t) -> u32 {
9854    static_assert_uimm_bits!(N, 1);
9855    unsafe { simd_extract!(a, N as u32) }
9856}
9857#[doc = "Set all vector lanes to the same value"]
9858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"]
9859#[inline(always)]
9860#[target_feature(enable = "neon")]
9861#[cfg_attr(test, assert_instr(nop, N = 1))]
9862#[rustc_legacy_const_generics(1)]
9863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9864pub fn vdupd_laneq_u64<const N: i32>(a: uint64x2_t) -> u64 {
9865    static_assert_uimm_bits!(N, 1);
9866    unsafe { simd_extract!(a, N as u32) }
9867}
9868#[doc = "Set all vector lanes to the same value"]
9869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"]
9870#[inline(always)]
9871#[target_feature(enable = "neon")]
9872#[cfg_attr(test, assert_instr(nop, N = 2))]
9873#[rustc_legacy_const_generics(1)]
9874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9875pub fn vdups_laneq_f32<const N: i32>(a: float32x4_t) -> f32 {
9876    static_assert_uimm_bits!(N, 2);
9877    unsafe { simd_extract!(a, N as u32) }
9878}
9879#[doc = "Set all vector lanes to the same value"]
9880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"]
9881#[inline(always)]
9882#[target_feature(enable = "neon")]
9883#[cfg_attr(test, assert_instr(nop, N = 2))]
9884#[rustc_legacy_const_generics(1)]
9885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9886pub fn vduph_lane_s16<const N: i32>(a: int16x4_t) -> i16 {
9887    static_assert_uimm_bits!(N, 2);
9888    unsafe { simd_extract!(a, N as u32) }
9889}
9890#[doc = "Set all vector lanes to the same value"]
9891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"]
9892#[inline(always)]
9893#[target_feature(enable = "neon")]
9894#[cfg_attr(test, assert_instr(nop, N = 2))]
9895#[rustc_legacy_const_generics(1)]
9896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9897pub fn vdups_laneq_s32<const N: i32>(a: int32x4_t) -> i32 {
9898    static_assert_uimm_bits!(N, 2);
9899    unsafe { simd_extract!(a, N as u32) }
9900}
9901#[doc = "Set all vector lanes to the same value"]
9902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"]
9903#[inline(always)]
9904#[target_feature(enable = "neon")]
9905#[cfg_attr(test, assert_instr(nop, N = 2))]
9906#[rustc_legacy_const_generics(1)]
9907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9908pub fn vduph_lane_u16<const N: i32>(a: uint16x4_t) -> u16 {
9909    static_assert_uimm_bits!(N, 2);
9910    unsafe { simd_extract!(a, N as u32) }
9911}
9912#[doc = "Set all vector lanes to the same value"]
9913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"]
9914#[inline(always)]
9915#[target_feature(enable = "neon")]
9916#[cfg_attr(test, assert_instr(nop, N = 2))]
9917#[rustc_legacy_const_generics(1)]
9918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9919pub fn vdups_laneq_u32<const N: i32>(a: uint32x4_t) -> u32 {
9920    static_assert_uimm_bits!(N, 2);
9921    unsafe { simd_extract!(a, N as u32) }
9922}
9923#[doc = "Set all vector lanes to the same value"]
9924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"]
9925#[inline(always)]
9926#[target_feature(enable = "neon")]
9927#[cfg_attr(test, assert_instr(nop, N = 2))]
9928#[rustc_legacy_const_generics(1)]
9929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9930pub fn vduph_lane_p16<const N: i32>(a: poly16x4_t) -> p16 {
9931    static_assert_uimm_bits!(N, 2);
9932    unsafe { simd_extract!(a, N as u32) }
9933}
9934#[doc = "Three-way exclusive OR"]
9935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"]
9936#[inline(always)]
9937#[target_feature(enable = "neon,sha3")]
9938#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9939#[cfg_attr(test, assert_instr(eor3))]
9940pub fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
9941    unsafe extern "unadjusted" {
9942        #[cfg_attr(
9943            any(target_arch = "aarch64", target_arch = "arm64ec"),
9944            link_name = "llvm.aarch64.crypto.eor3s.v16i8"
9945        )]
9946        fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
9947    }
9948    unsafe { _veor3q_s8(a, b, c) }
9949}
9950#[doc = "Three-way exclusive OR"]
9951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"]
9952#[inline(always)]
9953#[target_feature(enable = "neon,sha3")]
9954#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9955#[cfg_attr(test, assert_instr(eor3))]
9956pub fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
9957    unsafe extern "unadjusted" {
9958        #[cfg_attr(
9959            any(target_arch = "aarch64", target_arch = "arm64ec"),
9960            link_name = "llvm.aarch64.crypto.eor3s.v8i16"
9961        )]
9962        fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
9963    }
9964    unsafe { _veor3q_s16(a, b, c) }
9965}
9966#[doc = "Three-way exclusive OR"]
9967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"]
9968#[inline(always)]
9969#[target_feature(enable = "neon,sha3")]
9970#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9971#[cfg_attr(test, assert_instr(eor3))]
9972pub fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
9973    unsafe extern "unadjusted" {
9974        #[cfg_attr(
9975            any(target_arch = "aarch64", target_arch = "arm64ec"),
9976            link_name = "llvm.aarch64.crypto.eor3s.v4i32"
9977        )]
9978        fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
9979    }
9980    unsafe { _veor3q_s32(a, b, c) }
9981}
9982#[doc = "Three-way exclusive OR"]
9983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"]
9984#[inline(always)]
9985#[target_feature(enable = "neon,sha3")]
9986#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9987#[cfg_attr(test, assert_instr(eor3))]
9988pub fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
9989    unsafe extern "unadjusted" {
9990        #[cfg_attr(
9991            any(target_arch = "aarch64", target_arch = "arm64ec"),
9992            link_name = "llvm.aarch64.crypto.eor3s.v2i64"
9993        )]
9994        fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
9995    }
9996    unsafe { _veor3q_s64(a, b, c) }
9997}
9998#[doc = "Three-way exclusive OR"]
9999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"]
10000#[inline(always)]
10001#[target_feature(enable = "neon,sha3")]
10002#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10003#[cfg_attr(test, assert_instr(eor3))]
10004pub fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
10005    unsafe extern "unadjusted" {
10006        #[cfg_attr(
10007            any(target_arch = "aarch64", target_arch = "arm64ec"),
10008            link_name = "llvm.aarch64.crypto.eor3u.v16i8"
10009        )]
10010        fn _veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
10011    }
10012    unsafe { _veor3q_u8(a, b, c) }
10013}
10014#[doc = "Three-way exclusive OR"]
10015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"]
10016#[inline(always)]
10017#[target_feature(enable = "neon,sha3")]
10018#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10019#[cfg_attr(test, assert_instr(eor3))]
10020pub fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
10021    unsafe extern "unadjusted" {
10022        #[cfg_attr(
10023            any(target_arch = "aarch64", target_arch = "arm64ec"),
10024            link_name = "llvm.aarch64.crypto.eor3u.v8i16"
10025        )]
10026        fn _veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
10027    }
10028    unsafe { _veor3q_u16(a, b, c) }
10029}
10030#[doc = "Three-way exclusive OR"]
10031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"]
10032#[inline(always)]
10033#[target_feature(enable = "neon,sha3")]
10034#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10035#[cfg_attr(test, assert_instr(eor3))]
10036pub fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
10037    unsafe extern "unadjusted" {
10038        #[cfg_attr(
10039            any(target_arch = "aarch64", target_arch = "arm64ec"),
10040            link_name = "llvm.aarch64.crypto.eor3u.v4i32"
10041        )]
10042        fn _veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
10043    }
10044    unsafe { _veor3q_u32(a, b, c) }
10045}
10046#[doc = "Three-way exclusive OR"]
10047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"]
10048#[inline(always)]
10049#[target_feature(enable = "neon,sha3")]
10050#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10051#[cfg_attr(test, assert_instr(eor3))]
10052pub fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
10053    unsafe extern "unadjusted" {
10054        #[cfg_attr(
10055            any(target_arch = "aarch64", target_arch = "arm64ec"),
10056            link_name = "llvm.aarch64.crypto.eor3u.v2i64"
10057        )]
10058        fn _veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
10059    }
10060    unsafe { _veor3q_u64(a, b, c) }
10061}
10062#[doc = "Extract vector from pair of vectors"]
10063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"]
10064#[inline(always)]
10065#[target_feature(enable = "neon")]
10066#[cfg_attr(test, assert_instr(ext, N = 1))]
10067#[rustc_legacy_const_generics(2)]
10068#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10069pub fn vextq_f64<const N: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10070    static_assert_uimm_bits!(N, 1);
10071    unsafe {
10072        match N & 0b1 {
10073            0 => simd_shuffle!(a, b, [0, 1]),
10074            1 => simd_shuffle!(a, b, [1, 2]),
10075            _ => unreachable_unchecked(),
10076        }
10077    }
10078}
10079#[doc = "Extract vector from pair of vectors"]
10080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"]
10081#[inline(always)]
10082#[target_feature(enable = "neon")]
10083#[cfg_attr(test, assert_instr(ext, N = 1))]
10084#[rustc_legacy_const_generics(2)]
10085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10086pub fn vextq_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
10087    static_assert_uimm_bits!(N, 1);
10088    unsafe {
10089        match N & 0b1 {
10090            0 => simd_shuffle!(a, b, [0, 1]),
10091            1 => simd_shuffle!(a, b, [1, 2]),
10092            _ => unreachable_unchecked(),
10093        }
10094    }
10095}
10096#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"]
10098#[inline(always)]
10099#[target_feature(enable = "neon")]
10100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10101#[cfg_attr(test, assert_instr(fmadd))]
10102pub fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10103    unsafe { simd_fma(b, c, a) }
10104}
10105#[doc = "Floating-point fused multiply-add to accumulator"]
10106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f16)"]
10107#[inline(always)]
10108#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10109#[rustc_legacy_const_generics(3)]
10110#[target_feature(enable = "neon,fp16")]
10111#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10112#[cfg(not(target_arch = "arm64ec"))]
10113pub fn vfma_lane_f16<const LANE: i32>(
10114    a: float16x4_t,
10115    b: float16x4_t,
10116    c: float16x4_t,
10117) -> float16x4_t {
10118    static_assert_uimm_bits!(LANE, 2);
10119    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10120}
10121#[doc = "Floating-point fused multiply-add to accumulator"]
10122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f16)"]
10123#[inline(always)]
10124#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10125#[rustc_legacy_const_generics(3)]
10126#[target_feature(enable = "neon,fp16")]
10127#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10128#[cfg(not(target_arch = "arm64ec"))]
10129pub fn vfma_laneq_f16<const LANE: i32>(
10130    a: float16x4_t,
10131    b: float16x4_t,
10132    c: float16x8_t,
10133) -> float16x4_t {
10134    static_assert_uimm_bits!(LANE, 3);
10135    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10136}
10137#[doc = "Floating-point fused multiply-add to accumulator"]
10138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f16)"]
10139#[inline(always)]
10140#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10141#[rustc_legacy_const_generics(3)]
10142#[target_feature(enable = "neon,fp16")]
10143#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10144#[cfg(not(target_arch = "arm64ec"))]
10145pub fn vfmaq_lane_f16<const LANE: i32>(
10146    a: float16x8_t,
10147    b: float16x8_t,
10148    c: float16x4_t,
10149) -> float16x8_t {
10150    static_assert_uimm_bits!(LANE, 2);
10151    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10152}
10153#[doc = "Floating-point fused multiply-add to accumulator"]
10154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f16)"]
10155#[inline(always)]
10156#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10157#[rustc_legacy_const_generics(3)]
10158#[target_feature(enable = "neon,fp16")]
10159#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10160#[cfg(not(target_arch = "arm64ec"))]
10161pub fn vfmaq_laneq_f16<const LANE: i32>(
10162    a: float16x8_t,
10163    b: float16x8_t,
10164    c: float16x8_t,
10165) -> float16x8_t {
10166    static_assert_uimm_bits!(LANE, 3);
10167    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10168}
10169#[doc = "Floating-point fused multiply-add to accumulator"]
10170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"]
10171#[inline(always)]
10172#[target_feature(enable = "neon")]
10173#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10174#[rustc_legacy_const_generics(3)]
10175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10176pub fn vfma_lane_f32<const LANE: i32>(
10177    a: float32x2_t,
10178    b: float32x2_t,
10179    c: float32x2_t,
10180) -> float32x2_t {
10181    static_assert_uimm_bits!(LANE, 1);
10182    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10183}
10184#[doc = "Floating-point fused multiply-add to accumulator"]
10185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"]
10186#[inline(always)]
10187#[target_feature(enable = "neon")]
10188#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10189#[rustc_legacy_const_generics(3)]
10190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10191pub fn vfma_laneq_f32<const LANE: i32>(
10192    a: float32x2_t,
10193    b: float32x2_t,
10194    c: float32x4_t,
10195) -> float32x2_t {
10196    static_assert_uimm_bits!(LANE, 2);
10197    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10198}
10199#[doc = "Floating-point fused multiply-add to accumulator"]
10200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"]
10201#[inline(always)]
10202#[target_feature(enable = "neon")]
10203#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10204#[rustc_legacy_const_generics(3)]
10205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10206pub fn vfmaq_lane_f32<const LANE: i32>(
10207    a: float32x4_t,
10208    b: float32x4_t,
10209    c: float32x2_t,
10210) -> float32x4_t {
10211    static_assert_uimm_bits!(LANE, 1);
10212    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10213}
10214#[doc = "Floating-point fused multiply-add to accumulator"]
10215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"]
10216#[inline(always)]
10217#[target_feature(enable = "neon")]
10218#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10219#[rustc_legacy_const_generics(3)]
10220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10221pub fn vfmaq_laneq_f32<const LANE: i32>(
10222    a: float32x4_t,
10223    b: float32x4_t,
10224    c: float32x4_t,
10225) -> float32x4_t {
10226    static_assert_uimm_bits!(LANE, 2);
10227    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10228}
10229#[doc = "Floating-point fused multiply-add to accumulator"]
10230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"]
10231#[inline(always)]
10232#[target_feature(enable = "neon")]
10233#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10234#[rustc_legacy_const_generics(3)]
10235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10236pub fn vfmaq_laneq_f64<const LANE: i32>(
10237    a: float64x2_t,
10238    b: float64x2_t,
10239    c: float64x2_t,
10240) -> float64x2_t {
10241    static_assert_uimm_bits!(LANE, 1);
10242    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10243}
10244#[doc = "Floating-point fused multiply-add to accumulator"]
10245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"]
10246#[inline(always)]
10247#[target_feature(enable = "neon")]
10248#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10249#[rustc_legacy_const_generics(3)]
10250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10251pub fn vfma_lane_f64<const LANE: i32>(
10252    a: float64x1_t,
10253    b: float64x1_t,
10254    c: float64x1_t,
10255) -> float64x1_t {
10256    static_assert!(LANE == 0);
10257    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10258}
10259#[doc = "Floating-point fused multiply-add to accumulator"]
10260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"]
10261#[inline(always)]
10262#[target_feature(enable = "neon")]
10263#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10264#[rustc_legacy_const_generics(3)]
10265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10266pub fn vfma_laneq_f64<const LANE: i32>(
10267    a: float64x1_t,
10268    b: float64x1_t,
10269    c: float64x2_t,
10270) -> float64x1_t {
10271    static_assert_uimm_bits!(LANE, 1);
10272    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10273}
10274#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f16)"]
10276#[inline(always)]
10277#[target_feature(enable = "neon,fp16")]
10278#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10279#[cfg(not(target_arch = "arm64ec"))]
10280#[cfg_attr(test, assert_instr(fmla))]
10281pub fn vfma_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
10282    vfma_f16(a, b, vdup_n_f16(c))
10283}
10284#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f16)"]
10286#[inline(always)]
10287#[target_feature(enable = "neon,fp16")]
10288#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10289#[cfg(not(target_arch = "arm64ec"))]
10290#[cfg_attr(test, assert_instr(fmla))]
10291pub fn vfmaq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
10292    vfmaq_f16(a, b, vdupq_n_f16(c))
10293}
10294#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"]
10296#[inline(always)]
10297#[target_feature(enable = "neon")]
10298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10299#[cfg_attr(test, assert_instr(fmadd))]
10300pub fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
10301    vfma_f64(a, b, vdup_n_f64(c))
10302}
10303#[doc = "Floating-point fused multiply-add to accumulator"]
10304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"]
10305#[inline(always)]
10306#[target_feature(enable = "neon")]
10307#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10308#[rustc_legacy_const_generics(3)]
10309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10310pub fn vfmad_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
10311    static_assert!(LANE == 0);
10312    unsafe {
10313        let c: f64 = simd_extract!(c, LANE as u32);
10314        fmaf64(b, c, a)
10315    }
10316}
10317#[doc = "Floating-point fused multiply-add to accumulator"]
10318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_f16)"]
10319#[inline(always)]
10320#[cfg_attr(test, assert_instr(fmadd))]
10321#[target_feature(enable = "neon,fp16")]
10322#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10323#[cfg(not(target_arch = "arm64ec"))]
10324pub fn vfmah_f16(a: f16, b: f16, c: f16) -> f16 {
10325    fmaf16(b, c, a)
10326}
10327#[doc = "Floating-point fused multiply-add to accumulator"]
10328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_lane_f16)"]
10329#[inline(always)]
10330#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10331#[rustc_legacy_const_generics(3)]
10332#[target_feature(enable = "neon,fp16")]
10333#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10334#[cfg(not(target_arch = "arm64ec"))]
10335pub fn vfmah_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
10336    static_assert_uimm_bits!(LANE, 2);
10337    unsafe {
10338        let c: f16 = simd_extract!(v, LANE as u32);
10339        vfmah_f16(a, b, c)
10340    }
10341}
10342#[doc = "Floating-point fused multiply-add to accumulator"]
10343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_laneq_f16)"]
10344#[inline(always)]
10345#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10346#[rustc_legacy_const_generics(3)]
10347#[target_feature(enable = "neon,fp16")]
10348#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10349#[cfg(not(target_arch = "arm64ec"))]
10350pub fn vfmah_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
10351    static_assert_uimm_bits!(LANE, 3);
10352    unsafe {
10353        let c: f16 = simd_extract!(v, LANE as u32);
10354        vfmah_f16(a, b, c)
10355    }
10356}
10357#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"]
10359#[inline(always)]
10360#[target_feature(enable = "neon")]
10361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10362#[cfg_attr(test, assert_instr(fmla))]
10363pub fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10364    unsafe { simd_fma(b, c, a) }
10365}
10366#[doc = "Floating-point fused multiply-add to accumulator"]
10367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"]
10368#[inline(always)]
10369#[target_feature(enable = "neon")]
10370#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10371#[rustc_legacy_const_generics(3)]
10372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10373pub fn vfmaq_lane_f64<const LANE: i32>(
10374    a: float64x2_t,
10375    b: float64x2_t,
10376    c: float64x1_t,
10377) -> float64x2_t {
10378    static_assert!(LANE == 0);
10379    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10380}
10381#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"]
10383#[inline(always)]
10384#[target_feature(enable = "neon")]
10385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10386#[cfg_attr(test, assert_instr(fmla))]
10387pub fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
10388    vfmaq_f64(a, b, vdupq_n_f64(c))
10389}
10390#[doc = "Floating-point fused multiply-add to accumulator"]
10391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"]
10392#[inline(always)]
10393#[target_feature(enable = "neon")]
10394#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10395#[rustc_legacy_const_generics(3)]
10396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10397pub fn vfmas_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
10398    static_assert_uimm_bits!(LANE, 1);
10399    unsafe {
10400        let c: f32 = simd_extract!(c, LANE as u32);
10401        fmaf32(b, c, a)
10402    }
10403}
10404#[doc = "Floating-point fused multiply-add to accumulator"]
10405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"]
10406#[inline(always)]
10407#[target_feature(enable = "neon")]
10408#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10409#[rustc_legacy_const_generics(3)]
10410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10411pub fn vfmas_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
10412    static_assert_uimm_bits!(LANE, 2);
10413    unsafe {
10414        let c: f32 = simd_extract!(c, LANE as u32);
10415        fmaf32(b, c, a)
10416    }
10417}
10418#[doc = "Floating-point fused multiply-add to accumulator"]
10419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"]
10420#[inline(always)]
10421#[target_feature(enable = "neon")]
10422#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10423#[rustc_legacy_const_generics(3)]
10424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10425pub fn vfmad_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
10426    static_assert_uimm_bits!(LANE, 1);
10427    unsafe {
10428        let c: f64 = simd_extract!(c, LANE as u32);
10429        fmaf64(b, c, a)
10430    }
10431}
10432#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_high_f16)"]
10434#[inline(always)]
10435#[target_feature(enable = "neon,fp16")]
10436#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10437#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10438#[cfg(not(target_arch = "arm64ec"))]
10439#[cfg_attr(test, assert_instr(fmlal2))]
10440pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10441    unsafe extern "unadjusted" {
10442        #[cfg_attr(
10443            any(target_arch = "aarch64", target_arch = "arm64ec"),
10444            link_name = "llvm.aarch64.neon.fmlal2.v2f32.v4f16"
10445        )]
10446        fn _vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10447    }
10448    unsafe { _vfmlal_high_f16(r, a, b) }
10449}
10450#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_high_f16)"]
10452#[inline(always)]
10453#[target_feature(enable = "neon,fp16")]
10454#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10455#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10456#[cfg(not(target_arch = "arm64ec"))]
10457#[cfg_attr(test, assert_instr(fmlal2))]
10458pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10459    unsafe extern "unadjusted" {
10460        #[cfg_attr(
10461            any(target_arch = "aarch64", target_arch = "arm64ec"),
10462            link_name = "llvm.aarch64.neon.fmlal2.v4f32.v8f16"
10463        )]
10464        fn _vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10465    }
10466    unsafe { _vfmlalq_high_f16(r, a, b) }
10467}
10468#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_high_f16)"]
10470#[inline(always)]
10471#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10472#[target_feature(enable = "neon,fp16")]
10473#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10474#[rustc_legacy_const_generics(3)]
10475#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10476#[cfg(not(target_arch = "arm64ec"))]
10477pub fn vfmlal_lane_high_f16<const LANE: i32>(
10478    r: float32x2_t,
10479    a: float16x4_t,
10480    b: float16x4_t,
10481) -> float32x2_t {
10482    static_assert_uimm_bits!(LANE, 2);
10483    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10484}
10485#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_high_f16)"]
10487#[inline(always)]
10488#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10489#[target_feature(enable = "neon,fp16")]
10490#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10491#[rustc_legacy_const_generics(3)]
10492#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10493#[cfg(not(target_arch = "arm64ec"))]
10494pub fn vfmlal_laneq_high_f16<const LANE: i32>(
10495    r: float32x2_t,
10496    a: float16x4_t,
10497    b: float16x8_t,
10498) -> float32x2_t {
10499    static_assert_uimm_bits!(LANE, 3);
10500    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10501}
10502#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_high_f16)"]
10504#[inline(always)]
10505#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10506#[target_feature(enable = "neon,fp16")]
10507#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10508#[rustc_legacy_const_generics(3)]
10509#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10510#[cfg(not(target_arch = "arm64ec"))]
10511pub fn vfmlalq_lane_high_f16<const LANE: i32>(
10512    r: float32x4_t,
10513    a: float16x8_t,
10514    b: float16x4_t,
10515) -> float32x4_t {
10516    static_assert_uimm_bits!(LANE, 2);
10517    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10518}
10519#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_high_f16)"]
10521#[inline(always)]
10522#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10523#[target_feature(enable = "neon,fp16")]
10524#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10525#[rustc_legacy_const_generics(3)]
10526#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10527#[cfg(not(target_arch = "arm64ec"))]
10528pub fn vfmlalq_laneq_high_f16<const LANE: i32>(
10529    r: float32x4_t,
10530    a: float16x8_t,
10531    b: float16x8_t,
10532) -> float32x4_t {
10533    static_assert_uimm_bits!(LANE, 3);
10534    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10535}
10536#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_low_f16)"]
10538#[inline(always)]
10539#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10540#[target_feature(enable = "neon,fp16")]
10541#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10542#[rustc_legacy_const_generics(3)]
10543#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10544#[cfg(not(target_arch = "arm64ec"))]
10545pub fn vfmlal_lane_low_f16<const LANE: i32>(
10546    r: float32x2_t,
10547    a: float16x4_t,
10548    b: float16x4_t,
10549) -> float32x2_t {
10550    static_assert_uimm_bits!(LANE, 2);
10551    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10552}
10553#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_low_f16)"]
10555#[inline(always)]
10556#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10557#[target_feature(enable = "neon,fp16")]
10558#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10559#[rustc_legacy_const_generics(3)]
10560#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10561#[cfg(not(target_arch = "arm64ec"))]
10562pub fn vfmlal_laneq_low_f16<const LANE: i32>(
10563    r: float32x2_t,
10564    a: float16x4_t,
10565    b: float16x8_t,
10566) -> float32x2_t {
10567    static_assert_uimm_bits!(LANE, 3);
10568    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10569}
10570#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_low_f16)"]
10572#[inline(always)]
10573#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10574#[target_feature(enable = "neon,fp16")]
10575#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10576#[rustc_legacy_const_generics(3)]
10577#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10578#[cfg(not(target_arch = "arm64ec"))]
10579pub fn vfmlalq_lane_low_f16<const LANE: i32>(
10580    r: float32x4_t,
10581    a: float16x8_t,
10582    b: float16x4_t,
10583) -> float32x4_t {
10584    static_assert_uimm_bits!(LANE, 2);
10585    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10586}
10587#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_low_f16)"]
10589#[inline(always)]
10590#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10591#[target_feature(enable = "neon,fp16")]
10592#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10593#[rustc_legacy_const_generics(3)]
10594#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10595#[cfg(not(target_arch = "arm64ec"))]
10596pub fn vfmlalq_laneq_low_f16<const LANE: i32>(
10597    r: float32x4_t,
10598    a: float16x8_t,
10599    b: float16x8_t,
10600) -> float32x4_t {
10601    static_assert_uimm_bits!(LANE, 3);
10602    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10603}
10604#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_low_f16)"]
10606#[inline(always)]
10607#[target_feature(enable = "neon,fp16")]
10608#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10609#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10610#[cfg(not(target_arch = "arm64ec"))]
10611#[cfg_attr(test, assert_instr(fmlal))]
10612pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10613    unsafe extern "unadjusted" {
10614        #[cfg_attr(
10615            any(target_arch = "aarch64", target_arch = "arm64ec"),
10616            link_name = "llvm.aarch64.neon.fmlal.v2f32.v4f16"
10617        )]
10618        fn _vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10619    }
10620    unsafe { _vfmlal_low_f16(r, a, b) }
10621}
10622#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_low_f16)"]
10624#[inline(always)]
10625#[target_feature(enable = "neon,fp16")]
10626#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10627#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10628#[cfg(not(target_arch = "arm64ec"))]
10629#[cfg_attr(test, assert_instr(fmlal))]
10630pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10631    unsafe extern "unadjusted" {
10632        #[cfg_attr(
10633            any(target_arch = "aarch64", target_arch = "arm64ec"),
10634            link_name = "llvm.aarch64.neon.fmlal.v4f32.v8f16"
10635        )]
10636        fn _vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10637    }
10638    unsafe { _vfmlalq_low_f16(r, a, b) }
10639}
10640#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_high_f16)"]
10642#[inline(always)]
10643#[target_feature(enable = "neon,fp16")]
10644#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10645#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10646#[cfg(not(target_arch = "arm64ec"))]
10647#[cfg_attr(test, assert_instr(fmlsl2))]
10648pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10649    unsafe extern "unadjusted" {
10650        #[cfg_attr(
10651            any(target_arch = "aarch64", target_arch = "arm64ec"),
10652            link_name = "llvm.aarch64.neon.fmlsl2.v2f32.v4f16"
10653        )]
10654        fn _vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10655    }
10656    unsafe { _vfmlsl_high_f16(r, a, b) }
10657}
10658#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_high_f16)"]
10660#[inline(always)]
10661#[target_feature(enable = "neon,fp16")]
10662#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10663#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10664#[cfg(not(target_arch = "arm64ec"))]
10665#[cfg_attr(test, assert_instr(fmlsl2))]
10666pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10667    unsafe extern "unadjusted" {
10668        #[cfg_attr(
10669            any(target_arch = "aarch64", target_arch = "arm64ec"),
10670            link_name = "llvm.aarch64.neon.fmlsl2.v4f32.v8f16"
10671        )]
10672        fn _vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10673    }
10674    unsafe { _vfmlslq_high_f16(r, a, b) }
10675}
10676#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_high_f16)"]
10678#[inline(always)]
10679#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10680#[target_feature(enable = "neon,fp16")]
10681#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10682#[rustc_legacy_const_generics(3)]
10683#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10684#[cfg(not(target_arch = "arm64ec"))]
10685pub fn vfmlsl_lane_high_f16<const LANE: i32>(
10686    r: float32x2_t,
10687    a: float16x4_t,
10688    b: float16x4_t,
10689) -> float32x2_t {
10690    static_assert_uimm_bits!(LANE, 2);
10691    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10692}
10693#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_high_f16)"]
10695#[inline(always)]
10696#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10697#[target_feature(enable = "neon,fp16")]
10698#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10699#[rustc_legacy_const_generics(3)]
10700#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10701#[cfg(not(target_arch = "arm64ec"))]
10702pub fn vfmlsl_laneq_high_f16<const LANE: i32>(
10703    r: float32x2_t,
10704    a: float16x4_t,
10705    b: float16x8_t,
10706) -> float32x2_t {
10707    static_assert_uimm_bits!(LANE, 3);
10708    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10709}
10710#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_high_f16)"]
10712#[inline(always)]
10713#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10714#[target_feature(enable = "neon,fp16")]
10715#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10716#[rustc_legacy_const_generics(3)]
10717#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10718#[cfg(not(target_arch = "arm64ec"))]
10719pub fn vfmlslq_lane_high_f16<const LANE: i32>(
10720    r: float32x4_t,
10721    a: float16x8_t,
10722    b: float16x4_t,
10723) -> float32x4_t {
10724    static_assert_uimm_bits!(LANE, 2);
10725    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10726}
10727#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_high_f16)"]
10729#[inline(always)]
10730#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10731#[target_feature(enable = "neon,fp16")]
10732#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10733#[rustc_legacy_const_generics(3)]
10734#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10735#[cfg(not(target_arch = "arm64ec"))]
10736pub fn vfmlslq_laneq_high_f16<const LANE: i32>(
10737    r: float32x4_t,
10738    a: float16x8_t,
10739    b: float16x8_t,
10740) -> float32x4_t {
10741    static_assert_uimm_bits!(LANE, 3);
10742    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10743}
10744#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_low_f16)"]
10746#[inline(always)]
10747#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10748#[target_feature(enable = "neon,fp16")]
10749#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10750#[rustc_legacy_const_generics(3)]
10751#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10752#[cfg(not(target_arch = "arm64ec"))]
10753pub fn vfmlsl_lane_low_f16<const LANE: i32>(
10754    r: float32x2_t,
10755    a: float16x4_t,
10756    b: float16x4_t,
10757) -> float32x2_t {
10758    static_assert_uimm_bits!(LANE, 2);
10759    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10760}
10761#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_low_f16)"]
10763#[inline(always)]
10764#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10765#[target_feature(enable = "neon,fp16")]
10766#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10767#[rustc_legacy_const_generics(3)]
10768#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10769#[cfg(not(target_arch = "arm64ec"))]
10770pub fn vfmlsl_laneq_low_f16<const LANE: i32>(
10771    r: float32x2_t,
10772    a: float16x4_t,
10773    b: float16x8_t,
10774) -> float32x2_t {
10775    static_assert_uimm_bits!(LANE, 3);
10776    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10777}
10778#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_low_f16)"]
10780#[inline(always)]
10781#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10782#[target_feature(enable = "neon,fp16")]
10783#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10784#[rustc_legacy_const_generics(3)]
10785#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10786#[cfg(not(target_arch = "arm64ec"))]
10787pub fn vfmlslq_lane_low_f16<const LANE: i32>(
10788    r: float32x4_t,
10789    a: float16x8_t,
10790    b: float16x4_t,
10791) -> float32x4_t {
10792    static_assert_uimm_bits!(LANE, 2);
10793    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10794}
10795#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_low_f16)"]
10797#[inline(always)]
10798#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10799#[target_feature(enable = "neon,fp16")]
10800#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10801#[rustc_legacy_const_generics(3)]
10802#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10803#[cfg(not(target_arch = "arm64ec"))]
10804pub fn vfmlslq_laneq_low_f16<const LANE: i32>(
10805    r: float32x4_t,
10806    a: float16x8_t,
10807    b: float16x8_t,
10808) -> float32x4_t {
10809    static_assert_uimm_bits!(LANE, 3);
10810    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10811}
10812#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_low_f16)"]
10814#[inline(always)]
10815#[target_feature(enable = "neon,fp16")]
10816#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10817#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10818#[cfg(not(target_arch = "arm64ec"))]
10819#[cfg_attr(test, assert_instr(fmlsl))]
10820pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10821    unsafe extern "unadjusted" {
10822        #[cfg_attr(
10823            any(target_arch = "aarch64", target_arch = "arm64ec"),
10824            link_name = "llvm.aarch64.neon.fmlsl.v2f32.v4f16"
10825        )]
10826        fn _vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10827    }
10828    unsafe { _vfmlsl_low_f16(r, a, b) }
10829}
10830#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_low_f16)"]
10832#[inline(always)]
10833#[target_feature(enable = "neon,fp16")]
10834#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10835#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10836#[cfg(not(target_arch = "arm64ec"))]
10837#[cfg_attr(test, assert_instr(fmlsl))]
10838pub fn vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10839    unsafe extern "unadjusted" {
10840        #[cfg_attr(
10841            any(target_arch = "aarch64", target_arch = "arm64ec"),
10842            link_name = "llvm.aarch64.neon.fmlsl.v4f32.v8f16"
10843        )]
10844        fn _vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10845    }
10846    unsafe { _vfmlslq_low_f16(r, a, b) }
10847}
10848#[doc = "Floating-point fused multiply-subtract from accumulator"]
10849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"]
10850#[inline(always)]
10851#[target_feature(enable = "neon")]
10852#[cfg_attr(test, assert_instr(fmsub))]
10853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10854pub fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10855    unsafe {
10856        let b: float64x1_t = simd_neg(b);
10857        vfma_f64(a, b, c)
10858    }
10859}
10860#[doc = "Floating-point fused multiply-subtract from accumulator"]
10861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f16)"]
10862#[inline(always)]
10863#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10864#[rustc_legacy_const_generics(3)]
10865#[target_feature(enable = "neon,fp16")]
10866#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10867#[cfg(not(target_arch = "arm64ec"))]
10868pub fn vfms_lane_f16<const LANE: i32>(
10869    a: float16x4_t,
10870    b: float16x4_t,
10871    c: float16x4_t,
10872) -> float16x4_t {
10873    static_assert_uimm_bits!(LANE, 2);
10874    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10875}
10876#[doc = "Floating-point fused multiply-subtract from accumulator"]
10877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f16)"]
10878#[inline(always)]
10879#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10880#[rustc_legacy_const_generics(3)]
10881#[target_feature(enable = "neon,fp16")]
10882#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10883#[cfg(not(target_arch = "arm64ec"))]
10884pub fn vfms_laneq_f16<const LANE: i32>(
10885    a: float16x4_t,
10886    b: float16x4_t,
10887    c: float16x8_t,
10888) -> float16x4_t {
10889    static_assert_uimm_bits!(LANE, 3);
10890    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10891}
10892#[doc = "Floating-point fused multiply-subtract from accumulator"]
10893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f16)"]
10894#[inline(always)]
10895#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10896#[rustc_legacy_const_generics(3)]
10897#[target_feature(enable = "neon,fp16")]
10898#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10899#[cfg(not(target_arch = "arm64ec"))]
10900pub fn vfmsq_lane_f16<const LANE: i32>(
10901    a: float16x8_t,
10902    b: float16x8_t,
10903    c: float16x4_t,
10904) -> float16x8_t {
10905    static_assert_uimm_bits!(LANE, 2);
10906    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10907}
10908#[doc = "Floating-point fused multiply-subtract from accumulator"]
10909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f16)"]
10910#[inline(always)]
10911#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10912#[rustc_legacy_const_generics(3)]
10913#[target_feature(enable = "neon,fp16")]
10914#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10915#[cfg(not(target_arch = "arm64ec"))]
10916pub fn vfmsq_laneq_f16<const LANE: i32>(
10917    a: float16x8_t,
10918    b: float16x8_t,
10919    c: float16x8_t,
10920) -> float16x8_t {
10921    static_assert_uimm_bits!(LANE, 3);
10922    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10923}
10924#[doc = "Floating-point fused multiply-subtract to accumulator"]
10925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"]
10926#[inline(always)]
10927#[target_feature(enable = "neon")]
10928#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10929#[rustc_legacy_const_generics(3)]
10930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10931pub fn vfms_lane_f32<const LANE: i32>(
10932    a: float32x2_t,
10933    b: float32x2_t,
10934    c: float32x2_t,
10935) -> float32x2_t {
10936    static_assert_uimm_bits!(LANE, 1);
10937    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10938}
10939#[doc = "Floating-point fused multiply-subtract to accumulator"]
10940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"]
10941#[inline(always)]
10942#[target_feature(enable = "neon")]
10943#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10944#[rustc_legacy_const_generics(3)]
10945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10946pub fn vfms_laneq_f32<const LANE: i32>(
10947    a: float32x2_t,
10948    b: float32x2_t,
10949    c: float32x4_t,
10950) -> float32x2_t {
10951    static_assert_uimm_bits!(LANE, 2);
10952    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10953}
10954#[doc = "Floating-point fused multiply-subtract to accumulator"]
10955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"]
10956#[inline(always)]
10957#[target_feature(enable = "neon")]
10958#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10959#[rustc_legacy_const_generics(3)]
10960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10961pub fn vfmsq_lane_f32<const LANE: i32>(
10962    a: float32x4_t,
10963    b: float32x4_t,
10964    c: float32x2_t,
10965) -> float32x4_t {
10966    static_assert_uimm_bits!(LANE, 1);
10967    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10968}
10969#[doc = "Floating-point fused multiply-subtract to accumulator"]
10970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"]
10971#[inline(always)]
10972#[target_feature(enable = "neon")]
10973#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10974#[rustc_legacy_const_generics(3)]
10975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10976pub fn vfmsq_laneq_f32<const LANE: i32>(
10977    a: float32x4_t,
10978    b: float32x4_t,
10979    c: float32x4_t,
10980) -> float32x4_t {
10981    static_assert_uimm_bits!(LANE, 2);
10982    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10983}
10984#[doc = "Floating-point fused multiply-subtract to accumulator"]
10985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"]
10986#[inline(always)]
10987#[target_feature(enable = "neon")]
10988#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10989#[rustc_legacy_const_generics(3)]
10990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10991pub fn vfmsq_laneq_f64<const LANE: i32>(
10992    a: float64x2_t,
10993    b: float64x2_t,
10994    c: float64x2_t,
10995) -> float64x2_t {
10996    static_assert_uimm_bits!(LANE, 1);
10997    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10998}
10999#[doc = "Floating-point fused multiply-subtract to accumulator"]
11000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"]
11001#[inline(always)]
11002#[target_feature(enable = "neon")]
11003#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11004#[rustc_legacy_const_generics(3)]
11005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11006pub fn vfms_lane_f64<const LANE: i32>(
11007    a: float64x1_t,
11008    b: float64x1_t,
11009    c: float64x1_t,
11010) -> float64x1_t {
11011    static_assert!(LANE == 0);
11012    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
11013}
11014#[doc = "Floating-point fused multiply-subtract to accumulator"]
11015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"]
11016#[inline(always)]
11017#[target_feature(enable = "neon")]
11018#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11019#[rustc_legacy_const_generics(3)]
11020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11021pub fn vfms_laneq_f64<const LANE: i32>(
11022    a: float64x1_t,
11023    b: float64x1_t,
11024    c: float64x2_t,
11025) -> float64x1_t {
11026    static_assert_uimm_bits!(LANE, 1);
11027    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
11028}
11029#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f16)"]
11031#[inline(always)]
11032#[target_feature(enable = "neon,fp16")]
11033#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11034#[cfg(not(target_arch = "arm64ec"))]
11035#[cfg_attr(test, assert_instr(fmls))]
11036pub fn vfms_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
11037    vfms_f16(a, b, vdup_n_f16(c))
11038}
11039#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f16)"]
11041#[inline(always)]
11042#[target_feature(enable = "neon,fp16")]
11043#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11044#[cfg(not(target_arch = "arm64ec"))]
11045#[cfg_attr(test, assert_instr(fmls))]
11046pub fn vfmsq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
11047    vfmsq_f16(a, b, vdupq_n_f16(c))
11048}
11049#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"]
11051#[inline(always)]
11052#[target_feature(enable = "neon")]
11053#[cfg_attr(test, assert_instr(fmsub))]
11054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11055pub fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
11056    vfms_f64(a, b, vdup_n_f64(c))
11057}
11058#[doc = "Floating-point fused multiply-subtract from accumulator"]
11059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_f16)"]
11060#[inline(always)]
11061#[cfg_attr(test, assert_instr(fmsub))]
11062#[target_feature(enable = "neon,fp16")]
11063#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11064#[cfg(not(target_arch = "arm64ec"))]
11065pub fn vfmsh_f16(a: f16, b: f16, c: f16) -> f16 {
11066    vfmah_f16(a, -b, c)
11067}
11068#[doc = "Floating-point fused multiply-subtract from accumulator"]
11069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_lane_f16)"]
11070#[inline(always)]
11071#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11072#[rustc_legacy_const_generics(3)]
11073#[target_feature(enable = "neon,fp16")]
11074#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11075#[cfg(not(target_arch = "arm64ec"))]
11076pub fn vfmsh_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
11077    static_assert_uimm_bits!(LANE, 2);
11078    unsafe {
11079        let c: f16 = simd_extract!(v, LANE as u32);
11080        vfmsh_f16(a, b, c)
11081    }
11082}
11083#[doc = "Floating-point fused multiply-subtract from accumulator"]
11084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_laneq_f16)"]
11085#[inline(always)]
11086#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11087#[rustc_legacy_const_generics(3)]
11088#[target_feature(enable = "neon,fp16")]
11089#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11090#[cfg(not(target_arch = "arm64ec"))]
11091pub fn vfmsh_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
11092    static_assert_uimm_bits!(LANE, 3);
11093    unsafe {
11094        let c: f16 = simd_extract!(v, LANE as u32);
11095        vfmsh_f16(a, b, c)
11096    }
11097}
11098#[doc = "Floating-point fused multiply-subtract from accumulator"]
11099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"]
11100#[inline(always)]
11101#[target_feature(enable = "neon")]
11102#[cfg_attr(test, assert_instr(fmls))]
11103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11104pub fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
11105    unsafe {
11106        let b: float64x2_t = simd_neg(b);
11107        vfmaq_f64(a, b, c)
11108    }
11109}
11110#[doc = "Floating-point fused multiply-subtract to accumulator"]
11111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"]
11112#[inline(always)]
11113#[target_feature(enable = "neon")]
11114#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
11115#[rustc_legacy_const_generics(3)]
11116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11117pub fn vfmsq_lane_f64<const LANE: i32>(
11118    a: float64x2_t,
11119    b: float64x2_t,
11120    c: float64x1_t,
11121) -> float64x2_t {
11122    static_assert!(LANE == 0);
11123    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
11124}
11125#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"]
11127#[inline(always)]
11128#[target_feature(enable = "neon")]
11129#[cfg_attr(test, assert_instr(fmls))]
11130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11131pub fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
11132    vfmsq_f64(a, b, vdupq_n_f64(c))
11133}
11134#[doc = "Floating-point fused multiply-subtract to accumulator"]
11135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"]
11136#[inline(always)]
11137#[target_feature(enable = "neon")]
11138#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11139#[rustc_legacy_const_generics(3)]
11140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11141pub fn vfmss_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
11142    vfmas_lane_f32::<LANE>(a, -b, c)
11143}
11144#[doc = "Floating-point fused multiply-subtract to accumulator"]
11145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"]
11146#[inline(always)]
11147#[target_feature(enable = "neon")]
11148#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11149#[rustc_legacy_const_generics(3)]
11150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11151pub fn vfmss_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
11152    vfmas_laneq_f32::<LANE>(a, -b, c)
11153}
11154#[doc = "Floating-point fused multiply-subtract to accumulator"]
11155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"]
11156#[inline(always)]
11157#[target_feature(enable = "neon")]
11158#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11159#[rustc_legacy_const_generics(3)]
11160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11161pub fn vfmsd_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
11162    vfmad_lane_f64::<LANE>(a, -b, c)
11163}
11164#[doc = "Floating-point fused multiply-subtract to accumulator"]
11165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"]
11166#[inline(always)]
11167#[target_feature(enable = "neon")]
11168#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11169#[rustc_legacy_const_generics(3)]
11170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11171pub fn vfmsd_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
11172    vfmad_laneq_f64::<LANE>(a, -b, c)
11173}
11174#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"]
11176#[doc = "## Safety"]
11177#[doc = "  * Neon intrinsic unsafe"]
11178#[inline(always)]
11179#[target_feature(enable = "neon,fp16")]
11180#[cfg_attr(test, assert_instr(ldr))]
11181#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11182#[cfg(not(target_arch = "arm64ec"))]
11183pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t {
11184    crate::ptr::read_unaligned(ptr.cast())
11185}
11186#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"]
11188#[doc = "## Safety"]
11189#[doc = "  * Neon intrinsic unsafe"]
11190#[inline(always)]
11191#[target_feature(enable = "neon,fp16")]
11192#[cfg_attr(test, assert_instr(ldr))]
11193#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11194#[cfg(not(target_arch = "arm64ec"))]
11195pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t {
11196    crate::ptr::read_unaligned(ptr.cast())
11197}
11198#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"]
11200#[doc = "## Safety"]
11201#[doc = "  * Neon intrinsic unsafe"]
11202#[inline(always)]
11203#[target_feature(enable = "neon")]
11204#[cfg_attr(test, assert_instr(ldr))]
11205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11206pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t {
11207    crate::ptr::read_unaligned(ptr.cast())
11208}
11209#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"]
11211#[doc = "## Safety"]
11212#[doc = "  * Neon intrinsic unsafe"]
11213#[inline(always)]
11214#[target_feature(enable = "neon")]
11215#[cfg_attr(test, assert_instr(ldr))]
11216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11217pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t {
11218    crate::ptr::read_unaligned(ptr.cast())
11219}
11220#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64)"]
11222#[doc = "## Safety"]
11223#[doc = "  * Neon intrinsic unsafe"]
11224#[inline(always)]
11225#[target_feature(enable = "neon")]
11226#[cfg_attr(test, assert_instr(ldr))]
11227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11228pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t {
11229    crate::ptr::read_unaligned(ptr.cast())
11230}
11231#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"]
11233#[doc = "## Safety"]
11234#[doc = "  * Neon intrinsic unsafe"]
11235#[inline(always)]
11236#[target_feature(enable = "neon")]
11237#[cfg_attr(test, assert_instr(ldr))]
11238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11239pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t {
11240    crate::ptr::read_unaligned(ptr.cast())
11241}
11242#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"]
11244#[doc = "## Safety"]
11245#[doc = "  * Neon intrinsic unsafe"]
11246#[inline(always)]
11247#[target_feature(enable = "neon")]
11248#[cfg_attr(test, assert_instr(ldr))]
11249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11250pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t {
11251    crate::ptr::read_unaligned(ptr.cast())
11252}
11253#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"]
11255#[doc = "## Safety"]
11256#[doc = "  * Neon intrinsic unsafe"]
11257#[inline(always)]
11258#[target_feature(enable = "neon")]
11259#[cfg_attr(test, assert_instr(ldr))]
11260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11261pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t {
11262    crate::ptr::read_unaligned(ptr.cast())
11263}
11264#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"]
11266#[doc = "## Safety"]
11267#[doc = "  * Neon intrinsic unsafe"]
11268#[inline(always)]
11269#[target_feature(enable = "neon")]
11270#[cfg_attr(test, assert_instr(ldr))]
11271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11272pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t {
11273    crate::ptr::read_unaligned(ptr.cast())
11274}
11275#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"]
11277#[doc = "## Safety"]
11278#[doc = "  * Neon intrinsic unsafe"]
11279#[inline(always)]
11280#[target_feature(enable = "neon")]
11281#[cfg_attr(test, assert_instr(ldr))]
11282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11283pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t {
11284    crate::ptr::read_unaligned(ptr.cast())
11285}
11286#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"]
11288#[doc = "## Safety"]
11289#[doc = "  * Neon intrinsic unsafe"]
11290#[inline(always)]
11291#[target_feature(enable = "neon")]
11292#[cfg_attr(test, assert_instr(ldr))]
11293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11294pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t {
11295    crate::ptr::read_unaligned(ptr.cast())
11296}
11297#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"]
11299#[doc = "## Safety"]
11300#[doc = "  * Neon intrinsic unsafe"]
11301#[inline(always)]
11302#[target_feature(enable = "neon")]
11303#[cfg_attr(test, assert_instr(ldr))]
11304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11305pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t {
11306    crate::ptr::read_unaligned(ptr.cast())
11307}
11308#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"]
11310#[doc = "## Safety"]
11311#[doc = "  * Neon intrinsic unsafe"]
11312#[inline(always)]
11313#[target_feature(enable = "neon")]
11314#[cfg_attr(test, assert_instr(ldr))]
11315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11316pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t {
11317    crate::ptr::read_unaligned(ptr.cast())
11318}
11319#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"]
11321#[doc = "## Safety"]
11322#[doc = "  * Neon intrinsic unsafe"]
11323#[inline(always)]
11324#[target_feature(enable = "neon")]
11325#[cfg_attr(test, assert_instr(ldr))]
11326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11327pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t {
11328    crate::ptr::read_unaligned(ptr.cast())
11329}
11330#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"]
11332#[doc = "## Safety"]
11333#[doc = "  * Neon intrinsic unsafe"]
11334#[inline(always)]
11335#[target_feature(enable = "neon")]
11336#[cfg_attr(test, assert_instr(ldr))]
11337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11338pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t {
11339    crate::ptr::read_unaligned(ptr.cast())
11340}
11341#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"]
11343#[doc = "## Safety"]
11344#[doc = "  * Neon intrinsic unsafe"]
11345#[inline(always)]
11346#[target_feature(enable = "neon")]
11347#[cfg_attr(test, assert_instr(ldr))]
11348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11349pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t {
11350    crate::ptr::read_unaligned(ptr.cast())
11351}
11352#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"]
11354#[doc = "## Safety"]
11355#[doc = "  * Neon intrinsic unsafe"]
11356#[inline(always)]
11357#[target_feature(enable = "neon")]
11358#[cfg_attr(test, assert_instr(ldr))]
11359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11360pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t {
11361    crate::ptr::read_unaligned(ptr.cast())
11362}
11363#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"]
11365#[doc = "## Safety"]
11366#[doc = "  * Neon intrinsic unsafe"]
11367#[inline(always)]
11368#[target_feature(enable = "neon")]
11369#[cfg_attr(test, assert_instr(ldr))]
11370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11371pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t {
11372    crate::ptr::read_unaligned(ptr.cast())
11373}
11374#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"]
11376#[doc = "## Safety"]
11377#[doc = "  * Neon intrinsic unsafe"]
11378#[inline(always)]
11379#[target_feature(enable = "neon")]
11380#[cfg_attr(test, assert_instr(ldr))]
11381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11382pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t {
11383    crate::ptr::read_unaligned(ptr.cast())
11384}
11385#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"]
11387#[doc = "## Safety"]
11388#[doc = "  * Neon intrinsic unsafe"]
11389#[inline(always)]
11390#[target_feature(enable = "neon")]
11391#[cfg_attr(test, assert_instr(ldr))]
11392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11393pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t {
11394    crate::ptr::read_unaligned(ptr.cast())
11395}
11396#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"]
11398#[doc = "## Safety"]
11399#[doc = "  * Neon intrinsic unsafe"]
11400#[inline(always)]
11401#[target_feature(enable = "neon")]
11402#[cfg_attr(test, assert_instr(ldr))]
11403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11404pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t {
11405    crate::ptr::read_unaligned(ptr.cast())
11406}
11407#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"]
11409#[doc = "## Safety"]
11410#[doc = "  * Neon intrinsic unsafe"]
11411#[inline(always)]
11412#[target_feature(enable = "neon")]
11413#[cfg_attr(test, assert_instr(ldr))]
11414#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11415pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t {
11416    crate::ptr::read_unaligned(ptr.cast())
11417}
11418#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"]
11420#[doc = "## Safety"]
11421#[doc = "  * Neon intrinsic unsafe"]
11422#[inline(always)]
11423#[target_feature(enable = "neon")]
11424#[cfg_attr(test, assert_instr(ldr))]
11425#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11426pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t {
11427    crate::ptr::read_unaligned(ptr.cast())
11428}
11429#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"]
11431#[doc = "## Safety"]
11432#[doc = "  * Neon intrinsic unsafe"]
11433#[inline(always)]
11434#[target_feature(enable = "neon")]
11435#[cfg_attr(test, assert_instr(ldr))]
11436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11437pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t {
11438    crate::ptr::read_unaligned(ptr.cast())
11439}
11440#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"]
11442#[doc = "## Safety"]
11443#[doc = "  * Neon intrinsic unsafe"]
11444#[inline(always)]
11445#[target_feature(enable = "neon")]
11446#[cfg_attr(test, assert_instr(ldr))]
11447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11448pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t {
11449    crate::ptr::read_unaligned(ptr.cast())
11450}
11451#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"]
11453#[doc = "## Safety"]
11454#[doc = "  * Neon intrinsic unsafe"]
11455#[inline(always)]
11456#[target_feature(enable = "neon")]
11457#[cfg_attr(test, assert_instr(ldr))]
11458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11459pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t {
11460    crate::ptr::read_unaligned(ptr.cast())
11461}
11462#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"]
11464#[doc = "## Safety"]
11465#[doc = "  * Neon intrinsic unsafe"]
11466#[inline(always)]
11467#[target_feature(enable = "neon,aes")]
11468#[cfg_attr(test, assert_instr(ldr))]
11469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11470pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t {
11471    crate::ptr::read_unaligned(ptr.cast())
11472}
11473#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"]
11475#[doc = "## Safety"]
11476#[doc = "  * Neon intrinsic unsafe"]
11477#[inline(always)]
11478#[target_feature(enable = "neon,aes")]
11479#[cfg_attr(test, assert_instr(ldr))]
11480#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11481pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t {
11482    crate::ptr::read_unaligned(ptr.cast())
11483}
11484#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"]
11486#[doc = "## Safety"]
11487#[doc = "  * Neon intrinsic unsafe"]
11488#[inline(always)]
11489#[target_feature(enable = "neon")]
11490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11491#[cfg_attr(test, assert_instr(ld))]
11492pub unsafe fn vld1_f64_x2(ptr: *const f64) -> float64x1x2_t {
11493    crate::ptr::read_unaligned(ptr.cast())
11494}
11495#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"]
11497#[doc = "## Safety"]
11498#[doc = "  * Neon intrinsic unsafe"]
11499#[inline(always)]
11500#[target_feature(enable = "neon")]
11501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11502#[cfg_attr(test, assert_instr(ld))]
11503pub unsafe fn vld1_f64_x3(ptr: *const f64) -> float64x1x3_t {
11504    crate::ptr::read_unaligned(ptr.cast())
11505}
11506#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"]
11508#[doc = "## Safety"]
11509#[doc = "  * Neon intrinsic unsafe"]
11510#[inline(always)]
11511#[target_feature(enable = "neon")]
11512#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11513#[cfg_attr(test, assert_instr(ld))]
11514pub unsafe fn vld1_f64_x4(ptr: *const f64) -> float64x1x4_t {
11515    crate::ptr::read_unaligned(ptr.cast())
11516}
11517#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"]
11519#[doc = "## Safety"]
11520#[doc = "  * Neon intrinsic unsafe"]
11521#[inline(always)]
11522#[target_feature(enable = "neon")]
11523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11524#[cfg_attr(test, assert_instr(ld))]
11525pub unsafe fn vld1q_f64_x2(ptr: *const f64) -> float64x2x2_t {
11526    crate::ptr::read_unaligned(ptr.cast())
11527}
11528#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"]
11530#[doc = "## Safety"]
11531#[doc = "  * Neon intrinsic unsafe"]
11532#[inline(always)]
11533#[target_feature(enable = "neon")]
11534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11535#[cfg_attr(test, assert_instr(ld))]
11536pub unsafe fn vld1q_f64_x3(ptr: *const f64) -> float64x2x3_t {
11537    crate::ptr::read_unaligned(ptr.cast())
11538}
11539#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"]
11541#[doc = "## Safety"]
11542#[doc = "  * Neon intrinsic unsafe"]
11543#[inline(always)]
11544#[target_feature(enable = "neon")]
11545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11546#[cfg_attr(test, assert_instr(ld))]
11547pub unsafe fn vld1q_f64_x4(ptr: *const f64) -> float64x2x4_t {
11548    crate::ptr::read_unaligned(ptr.cast())
11549}
11550#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"]
11552#[doc = "## Safety"]
11553#[doc = "  * Neon intrinsic unsafe"]
11554#[inline(always)]
11555#[target_feature(enable = "neon")]
11556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11557#[cfg_attr(test, assert_instr(ld2r))]
11558pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t {
11559    unsafe extern "unadjusted" {
11560        #[cfg_attr(
11561            any(target_arch = "aarch64", target_arch = "arm64ec"),
11562            link_name = "llvm.aarch64.neon.ld2r.v1f64.p0"
11563        )]
11564        fn _vld2_dup_f64(ptr: *const f64) -> float64x1x2_t;
11565    }
11566    _vld2_dup_f64(a as _)
11567}
11568#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"]
11570#[doc = "## Safety"]
11571#[doc = "  * Neon intrinsic unsafe"]
11572#[inline(always)]
11573#[target_feature(enable = "neon")]
11574#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11575#[cfg_attr(test, assert_instr(ld2r))]
11576pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t {
11577    unsafe extern "unadjusted" {
11578        #[cfg_attr(
11579            any(target_arch = "aarch64", target_arch = "arm64ec"),
11580            link_name = "llvm.aarch64.neon.ld2r.v2f64.p0"
11581        )]
11582        fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t;
11583    }
11584    _vld2q_dup_f64(a as _)
11585}
11586#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"]
11588#[doc = "## Safety"]
11589#[doc = "  * Neon intrinsic unsafe"]
11590#[inline(always)]
11591#[target_feature(enable = "neon")]
11592#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11593#[cfg_attr(test, assert_instr(ld2r))]
11594pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t {
11595    unsafe extern "unadjusted" {
11596        #[cfg_attr(
11597            any(target_arch = "aarch64", target_arch = "arm64ec"),
11598            link_name = "llvm.aarch64.neon.ld2r.v2i64.p0"
11599        )]
11600        fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t;
11601    }
11602    _vld2q_dup_s64(a as _)
11603}
11604#[doc = "Load multiple 2-element structures to two registers"]
11605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"]
11606#[doc = "## Safety"]
11607#[doc = "  * Neon intrinsic unsafe"]
11608#[inline(always)]
11609#[target_feature(enable = "neon")]
11610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11611#[cfg_attr(test, assert_instr(nop))]
11612pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t {
11613    crate::ptr::read_unaligned(a.cast())
11614}
11615#[doc = "Load multiple 2-element structures to two registers"]
11616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"]
11617#[doc = "## Safety"]
11618#[doc = "  * Neon intrinsic unsafe"]
11619#[inline(always)]
11620#[target_feature(enable = "neon")]
11621#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11622#[rustc_legacy_const_generics(2)]
11623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11624pub unsafe fn vld2_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x2_t) -> float64x1x2_t {
11625    static_assert!(LANE == 0);
11626    unsafe extern "unadjusted" {
11627        #[cfg_attr(
11628            any(target_arch = "aarch64", target_arch = "arm64ec"),
11629            link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0"
11630        )]
11631        fn _vld2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t;
11632    }
11633    _vld2_lane_f64(b.0, b.1, LANE as i64, a as _)
11634}
11635#[doc = "Load multiple 2-element structures to two registers"]
11636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"]
11637#[doc = "## Safety"]
11638#[doc = "  * Neon intrinsic unsafe"]
11639#[inline(always)]
11640#[target_feature(enable = "neon")]
11641#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11642#[rustc_legacy_const_generics(2)]
11643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11644pub unsafe fn vld2_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x2_t) -> int64x1x2_t {
11645    static_assert!(LANE == 0);
11646    unsafe extern "unadjusted" {
11647        #[cfg_attr(
11648            any(target_arch = "aarch64", target_arch = "arm64ec"),
11649            link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0"
11650        )]
11651        fn _vld2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t;
11652    }
11653    _vld2_lane_s64(b.0, b.1, LANE as i64, a as _)
11654}
11655#[doc = "Load multiple 2-element structures to two registers"]
11656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"]
11657#[doc = "## Safety"]
11658#[doc = "  * Neon intrinsic unsafe"]
11659#[inline(always)]
11660#[target_feature(enable = "neon,aes")]
11661#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11662#[rustc_legacy_const_generics(2)]
11663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11664pub unsafe fn vld2_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t {
11665    static_assert!(LANE == 0);
11666    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11667}
11668#[doc = "Load multiple 2-element structures to two registers"]
11669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"]
11670#[doc = "## Safety"]
11671#[doc = "  * Neon intrinsic unsafe"]
11672#[inline(always)]
11673#[target_feature(enable = "neon")]
11674#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11675#[rustc_legacy_const_generics(2)]
11676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11677pub unsafe fn vld2_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t {
11678    static_assert!(LANE == 0);
11679    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11680}
11681#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11683#[doc = "## Safety"]
11684#[doc = "  * Neon intrinsic unsafe"]
11685#[inline(always)]
11686#[cfg(target_endian = "little")]
11687#[target_feature(enable = "neon,aes")]
11688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11689#[cfg_attr(test, assert_instr(ld2r))]
11690pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11691    transmute(vld2q_dup_s64(transmute(a)))
11692}
11693#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11695#[doc = "## Safety"]
11696#[doc = "  * Neon intrinsic unsafe"]
11697#[inline(always)]
11698#[cfg(target_endian = "big")]
11699#[target_feature(enable = "neon,aes")]
11700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11701#[cfg_attr(test, assert_instr(ld2r))]
11702pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11703    let mut ret_val: poly64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11704    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11705    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11706    ret_val
11707}
11708#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11710#[doc = "## Safety"]
11711#[doc = "  * Neon intrinsic unsafe"]
11712#[inline(always)]
11713#[cfg(target_endian = "little")]
11714#[target_feature(enable = "neon")]
11715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11716#[cfg_attr(test, assert_instr(ld2r))]
11717pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11718    transmute(vld2q_dup_s64(transmute(a)))
11719}
11720#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11722#[doc = "## Safety"]
11723#[doc = "  * Neon intrinsic unsafe"]
11724#[inline(always)]
11725#[cfg(target_endian = "big")]
11726#[target_feature(enable = "neon")]
11727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11728#[cfg_attr(test, assert_instr(ld2r))]
11729pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11730    let mut ret_val: uint64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11731    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11732    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11733    ret_val
11734}
11735#[doc = "Load multiple 2-element structures to two registers"]
11736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"]
11737#[doc = "## Safety"]
11738#[doc = "  * Neon intrinsic unsafe"]
11739#[inline(always)]
11740#[target_feature(enable = "neon")]
11741#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11742#[cfg_attr(test, assert_instr(ld2))]
11743pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t {
11744    unsafe extern "unadjusted" {
11745        #[cfg_attr(
11746            any(target_arch = "aarch64", target_arch = "arm64ec"),
11747            link_name = "llvm.aarch64.neon.ld2.v2f64.p0"
11748        )]
11749        fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t;
11750    }
11751    _vld2q_f64(a as _)
11752}
11753#[doc = "Load multiple 2-element structures to two registers"]
11754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"]
11755#[doc = "## Safety"]
11756#[doc = "  * Neon intrinsic unsafe"]
11757#[inline(always)]
11758#[target_feature(enable = "neon")]
11759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11760#[cfg_attr(test, assert_instr(ld2))]
11761pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t {
11762    unsafe extern "unadjusted" {
11763        #[cfg_attr(
11764            any(target_arch = "aarch64", target_arch = "arm64ec"),
11765            link_name = "llvm.aarch64.neon.ld2.v2i64.p0"
11766        )]
11767        fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t;
11768    }
11769    _vld2q_s64(a as _)
11770}
11771#[doc = "Load multiple 2-element structures to two registers"]
11772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"]
11773#[doc = "## Safety"]
11774#[doc = "  * Neon intrinsic unsafe"]
11775#[inline(always)]
11776#[target_feature(enable = "neon")]
11777#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11778#[rustc_legacy_const_generics(2)]
11779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11780pub unsafe fn vld2q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x2_t) -> float64x2x2_t {
11781    static_assert_uimm_bits!(LANE, 1);
11782    unsafe extern "unadjusted" {
11783        #[cfg_attr(
11784            any(target_arch = "aarch64", target_arch = "arm64ec"),
11785            link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0"
11786        )]
11787        fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8)
11788            -> float64x2x2_t;
11789    }
11790    _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _)
11791}
11792#[doc = "Load multiple 2-element structures to two registers"]
11793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"]
11794#[doc = "## Safety"]
11795#[doc = "  * Neon intrinsic unsafe"]
11796#[inline(always)]
11797#[target_feature(enable = "neon")]
11798#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11799#[rustc_legacy_const_generics(2)]
11800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11801pub unsafe fn vld2q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x2_t) -> int8x16x2_t {
11802    static_assert_uimm_bits!(LANE, 4);
11803    unsafe extern "unadjusted" {
11804        #[cfg_attr(
11805            any(target_arch = "aarch64", target_arch = "arm64ec"),
11806            link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0"
11807        )]
11808        fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t;
11809    }
11810    _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _)
11811}
11812#[doc = "Load multiple 2-element structures to two registers"]
11813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"]
11814#[doc = "## Safety"]
11815#[doc = "  * Neon intrinsic unsafe"]
11816#[inline(always)]
11817#[target_feature(enable = "neon")]
11818#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11819#[rustc_legacy_const_generics(2)]
11820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11821pub unsafe fn vld2q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x2_t) -> int64x2x2_t {
11822    static_assert_uimm_bits!(LANE, 1);
11823    unsafe extern "unadjusted" {
11824        #[cfg_attr(
11825            any(target_arch = "aarch64", target_arch = "arm64ec"),
11826            link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0"
11827        )]
11828        fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t;
11829    }
11830    _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _)
11831}
11832#[doc = "Load multiple 2-element structures to two registers"]
11833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"]
11834#[doc = "## Safety"]
11835#[doc = "  * Neon intrinsic unsafe"]
11836#[inline(always)]
11837#[target_feature(enable = "neon,aes")]
11838#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11839#[rustc_legacy_const_generics(2)]
11840#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11841pub unsafe fn vld2q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t {
11842    static_assert_uimm_bits!(LANE, 1);
11843    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11844}
11845#[doc = "Load multiple 2-element structures to two registers"]
11846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"]
11847#[doc = "## Safety"]
11848#[doc = "  * Neon intrinsic unsafe"]
11849#[inline(always)]
11850#[target_feature(enable = "neon")]
11851#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11852#[rustc_legacy_const_generics(2)]
11853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11854pub unsafe fn vld2q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t {
11855    static_assert_uimm_bits!(LANE, 4);
11856    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11857}
11858#[doc = "Load multiple 2-element structures to two registers"]
11859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"]
11860#[doc = "## Safety"]
11861#[doc = "  * Neon intrinsic unsafe"]
11862#[inline(always)]
11863#[target_feature(enable = "neon")]
11864#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11865#[rustc_legacy_const_generics(2)]
11866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11867pub unsafe fn vld2q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t {
11868    static_assert_uimm_bits!(LANE, 1);
11869    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11870}
11871#[doc = "Load multiple 2-element structures to two registers"]
11872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"]
11873#[doc = "## Safety"]
11874#[doc = "  * Neon intrinsic unsafe"]
11875#[inline(always)]
11876#[target_feature(enable = "neon")]
11877#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11878#[rustc_legacy_const_generics(2)]
11879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11880pub unsafe fn vld2q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t {
11881    static_assert_uimm_bits!(LANE, 4);
11882    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11883}
11884#[doc = "Load multiple 2-element structures to two registers"]
11885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11886#[doc = "## Safety"]
11887#[doc = "  * Neon intrinsic unsafe"]
11888#[inline(always)]
11889#[cfg(target_endian = "little")]
11890#[target_feature(enable = "neon,aes")]
11891#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11892#[cfg_attr(test, assert_instr(ld2))]
11893pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11894    transmute(vld2q_s64(transmute(a)))
11895}
11896#[doc = "Load multiple 2-element structures to two registers"]
11897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11898#[doc = "## Safety"]
11899#[doc = "  * Neon intrinsic unsafe"]
11900#[inline(always)]
11901#[cfg(target_endian = "big")]
11902#[target_feature(enable = "neon,aes")]
11903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11904#[cfg_attr(test, assert_instr(ld2))]
11905pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11906    let mut ret_val: poly64x2x2_t = transmute(vld2q_s64(transmute(a)));
11907    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11908    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11909    ret_val
11910}
11911#[doc = "Load multiple 2-element structures to two registers"]
11912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11913#[doc = "## Safety"]
11914#[doc = "  * Neon intrinsic unsafe"]
11915#[inline(always)]
11916#[target_feature(enable = "neon")]
11917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11918#[cfg_attr(test, assert_instr(ld2))]
11919pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11920    transmute(vld2q_s64(transmute(a)))
11921}
11922#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"]
11924#[doc = "## Safety"]
11925#[doc = "  * Neon intrinsic unsafe"]
11926#[inline(always)]
11927#[target_feature(enable = "neon")]
11928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11929#[cfg_attr(test, assert_instr(ld3r))]
11930pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t {
11931    unsafe extern "unadjusted" {
11932        #[cfg_attr(
11933            any(target_arch = "aarch64", target_arch = "arm64ec"),
11934            link_name = "llvm.aarch64.neon.ld3r.v1f64.p0"
11935        )]
11936        fn _vld3_dup_f64(ptr: *const f64) -> float64x1x3_t;
11937    }
11938    _vld3_dup_f64(a as _)
11939}
11940#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"]
11942#[doc = "## Safety"]
11943#[doc = "  * Neon intrinsic unsafe"]
11944#[inline(always)]
11945#[target_feature(enable = "neon")]
11946#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11947#[cfg_attr(test, assert_instr(ld3r))]
11948pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t {
11949    unsafe extern "unadjusted" {
11950        #[cfg_attr(
11951            any(target_arch = "aarch64", target_arch = "arm64ec"),
11952            link_name = "llvm.aarch64.neon.ld3r.v2f64.p0"
11953        )]
11954        fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t;
11955    }
11956    _vld3q_dup_f64(a as _)
11957}
11958#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"]
11960#[doc = "## Safety"]
11961#[doc = "  * Neon intrinsic unsafe"]
11962#[inline(always)]
11963#[target_feature(enable = "neon")]
11964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11965#[cfg_attr(test, assert_instr(ld3r))]
11966pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t {
11967    unsafe extern "unadjusted" {
11968        #[cfg_attr(
11969            any(target_arch = "aarch64", target_arch = "arm64ec"),
11970            link_name = "llvm.aarch64.neon.ld3r.v2i64.p0"
11971        )]
11972        fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t;
11973    }
11974    _vld3q_dup_s64(a as _)
11975}
11976#[doc = "Load multiple 3-element structures to three registers"]
11977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"]
11978#[doc = "## Safety"]
11979#[doc = "  * Neon intrinsic unsafe"]
11980#[inline(always)]
11981#[target_feature(enable = "neon")]
11982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11983#[cfg_attr(test, assert_instr(nop))]
11984pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t {
11985    crate::ptr::read_unaligned(a.cast())
11986}
11987#[doc = "Load multiple 3-element structures to three registers"]
11988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"]
11989#[doc = "## Safety"]
11990#[doc = "  * Neon intrinsic unsafe"]
11991#[inline(always)]
11992#[target_feature(enable = "neon")]
11993#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
11994#[rustc_legacy_const_generics(2)]
11995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11996pub unsafe fn vld3_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x3_t) -> float64x1x3_t {
11997    static_assert!(LANE == 0);
11998    unsafe extern "unadjusted" {
11999        #[cfg_attr(
12000            any(target_arch = "aarch64", target_arch = "arm64ec"),
12001            link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0"
12002        )]
12003        fn _vld3_lane_f64(
12004            a: float64x1_t,
12005            b: float64x1_t,
12006            c: float64x1_t,
12007            n: i64,
12008            ptr: *const i8,
12009        ) -> float64x1x3_t;
12010    }
12011    _vld3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12012}
12013#[doc = "Load multiple 3-element structures to three registers"]
12014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"]
12015#[doc = "## Safety"]
12016#[doc = "  * Neon intrinsic unsafe"]
12017#[inline(always)]
12018#[target_feature(enable = "neon,aes")]
12019#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12020#[rustc_legacy_const_generics(2)]
12021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12022pub unsafe fn vld3_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t {
12023    static_assert!(LANE == 0);
12024    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12025}
12026#[doc = "Load multiple 3-element structures to two registers"]
12027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"]
12028#[doc = "## Safety"]
12029#[doc = "  * Neon intrinsic unsafe"]
12030#[inline(always)]
12031#[target_feature(enable = "neon")]
12032#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12033#[rustc_legacy_const_generics(2)]
12034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12035pub unsafe fn vld3_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x3_t) -> int64x1x3_t {
12036    static_assert!(LANE == 0);
12037    unsafe extern "unadjusted" {
12038        #[cfg_attr(
12039            any(target_arch = "aarch64", target_arch = "arm64ec"),
12040            link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0"
12041        )]
12042        fn _vld3_lane_s64(
12043            a: int64x1_t,
12044            b: int64x1_t,
12045            c: int64x1_t,
12046            n: i64,
12047            ptr: *const i8,
12048        ) -> int64x1x3_t;
12049    }
12050    _vld3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12051}
12052#[doc = "Load multiple 3-element structures to three registers"]
12053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"]
12054#[doc = "## Safety"]
12055#[doc = "  * Neon intrinsic unsafe"]
12056#[inline(always)]
12057#[target_feature(enable = "neon")]
12058#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12059#[rustc_legacy_const_generics(2)]
12060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12061pub unsafe fn vld3_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t {
12062    static_assert!(LANE == 0);
12063    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12064}
12065#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12067#[doc = "## Safety"]
12068#[doc = "  * Neon intrinsic unsafe"]
12069#[inline(always)]
12070#[cfg(target_endian = "little")]
12071#[target_feature(enable = "neon,aes")]
12072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12073#[cfg_attr(test, assert_instr(ld3r))]
12074pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12075    transmute(vld3q_dup_s64(transmute(a)))
12076}
12077#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12079#[doc = "## Safety"]
12080#[doc = "  * Neon intrinsic unsafe"]
12081#[inline(always)]
12082#[cfg(target_endian = "big")]
12083#[target_feature(enable = "neon,aes")]
12084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12085#[cfg_attr(test, assert_instr(ld3r))]
12086pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12087    let mut ret_val: poly64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12088    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12089    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12090    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12091    ret_val
12092}
12093#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12095#[doc = "## Safety"]
12096#[doc = "  * Neon intrinsic unsafe"]
12097#[inline(always)]
12098#[cfg(target_endian = "little")]
12099#[target_feature(enable = "neon")]
12100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12101#[cfg_attr(test, assert_instr(ld3r))]
12102pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12103    transmute(vld3q_dup_s64(transmute(a)))
12104}
12105#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12107#[doc = "## Safety"]
12108#[doc = "  * Neon intrinsic unsafe"]
12109#[inline(always)]
12110#[cfg(target_endian = "big")]
12111#[target_feature(enable = "neon")]
12112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12113#[cfg_attr(test, assert_instr(ld3r))]
12114pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12115    let mut ret_val: uint64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12116    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12117    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12118    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12119    ret_val
12120}
12121#[doc = "Load multiple 3-element structures to three registers"]
12122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"]
12123#[doc = "## Safety"]
12124#[doc = "  * Neon intrinsic unsafe"]
12125#[inline(always)]
12126#[target_feature(enable = "neon")]
12127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12128#[cfg_attr(test, assert_instr(ld3))]
12129pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t {
12130    crate::core_arch::macros::deinterleaving_load!(f64, 2, 3, a)
12131}
12132#[doc = "Load multiple 3-element structures to three registers"]
12133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"]
12134#[doc = "## Safety"]
12135#[doc = "  * Neon intrinsic unsafe"]
12136#[inline(always)]
12137#[target_feature(enable = "neon")]
12138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12139#[cfg_attr(test, assert_instr(ld3))]
12140pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t {
12141    crate::core_arch::macros::deinterleaving_load!(i64, 2, 3, a)
12142}
12143#[doc = "Load multiple 3-element structures to three registers"]
12144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"]
12145#[doc = "## Safety"]
12146#[doc = "  * Neon intrinsic unsafe"]
12147#[inline(always)]
12148#[target_feature(enable = "neon")]
12149#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12150#[rustc_legacy_const_generics(2)]
12151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12152pub unsafe fn vld3q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x3_t) -> float64x2x3_t {
12153    static_assert_uimm_bits!(LANE, 1);
12154    unsafe extern "unadjusted" {
12155        #[cfg_attr(
12156            any(target_arch = "aarch64", target_arch = "arm64ec"),
12157            link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0"
12158        )]
12159        fn _vld3q_lane_f64(
12160            a: float64x2_t,
12161            b: float64x2_t,
12162            c: float64x2_t,
12163            n: i64,
12164            ptr: *const i8,
12165        ) -> float64x2x3_t;
12166    }
12167    _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12168}
12169#[doc = "Load multiple 3-element structures to three registers"]
12170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"]
12171#[doc = "## Safety"]
12172#[doc = "  * Neon intrinsic unsafe"]
12173#[inline(always)]
12174#[target_feature(enable = "neon,aes")]
12175#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12176#[rustc_legacy_const_generics(2)]
12177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12178pub unsafe fn vld3q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t {
12179    static_assert_uimm_bits!(LANE, 1);
12180    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12181}
12182#[doc = "Load multiple 3-element structures to two registers"]
12183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"]
12184#[doc = "## Safety"]
12185#[doc = "  * Neon intrinsic unsafe"]
12186#[inline(always)]
12187#[target_feature(enable = "neon")]
12188#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12189#[rustc_legacy_const_generics(2)]
12190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12191pub unsafe fn vld3q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x3_t) -> int8x16x3_t {
12192    static_assert_uimm_bits!(LANE, 3);
12193    unsafe extern "unadjusted" {
12194        #[cfg_attr(
12195            any(target_arch = "aarch64", target_arch = "arm64ec"),
12196            link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0"
12197        )]
12198        fn _vld3q_lane_s8(
12199            a: int8x16_t,
12200            b: int8x16_t,
12201            c: int8x16_t,
12202            n: i64,
12203            ptr: *const i8,
12204        ) -> int8x16x3_t;
12205    }
12206    _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
12207}
12208#[doc = "Load multiple 3-element structures to two registers"]
12209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"]
12210#[doc = "## Safety"]
12211#[doc = "  * Neon intrinsic unsafe"]
12212#[inline(always)]
12213#[target_feature(enable = "neon")]
12214#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12215#[rustc_legacy_const_generics(2)]
12216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12217pub unsafe fn vld3q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x3_t) -> int64x2x3_t {
12218    static_assert_uimm_bits!(LANE, 1);
12219    unsafe extern "unadjusted" {
12220        #[cfg_attr(
12221            any(target_arch = "aarch64", target_arch = "arm64ec"),
12222            link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0"
12223        )]
12224        fn _vld3q_lane_s64(
12225            a: int64x2_t,
12226            b: int64x2_t,
12227            c: int64x2_t,
12228            n: i64,
12229            ptr: *const i8,
12230        ) -> int64x2x3_t;
12231    }
12232    _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12233}
12234#[doc = "Load multiple 3-element structures to three registers"]
12235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"]
12236#[doc = "## Safety"]
12237#[doc = "  * Neon intrinsic unsafe"]
12238#[inline(always)]
12239#[target_feature(enable = "neon")]
12240#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12241#[rustc_legacy_const_generics(2)]
12242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12243pub unsafe fn vld3q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t {
12244    static_assert_uimm_bits!(LANE, 4);
12245    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12246}
12247#[doc = "Load multiple 3-element structures to three registers"]
12248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"]
12249#[doc = "## Safety"]
12250#[doc = "  * Neon intrinsic unsafe"]
12251#[inline(always)]
12252#[target_feature(enable = "neon")]
12253#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12254#[rustc_legacy_const_generics(2)]
12255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12256pub unsafe fn vld3q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t {
12257    static_assert_uimm_bits!(LANE, 1);
12258    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12259}
12260#[doc = "Load multiple 3-element structures to three registers"]
12261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"]
12262#[doc = "## Safety"]
12263#[doc = "  * Neon intrinsic unsafe"]
12264#[inline(always)]
12265#[target_feature(enable = "neon")]
12266#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12267#[rustc_legacy_const_generics(2)]
12268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12269pub unsafe fn vld3q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t {
12270    static_assert_uimm_bits!(LANE, 4);
12271    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12272}
12273#[doc = "Load multiple 3-element structures to three registers"]
12274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12275#[doc = "## Safety"]
12276#[doc = "  * Neon intrinsic unsafe"]
12277#[inline(always)]
12278#[cfg(target_endian = "little")]
12279#[target_feature(enable = "neon,aes")]
12280#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12281#[cfg_attr(test, assert_instr(ld3))]
12282pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12283    transmute(vld3q_s64(transmute(a)))
12284}
12285#[doc = "Load multiple 3-element structures to three registers"]
12286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12287#[doc = "## Safety"]
12288#[doc = "  * Neon intrinsic unsafe"]
12289#[inline(always)]
12290#[cfg(target_endian = "big")]
12291#[target_feature(enable = "neon,aes")]
12292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12293#[cfg_attr(test, assert_instr(ld3))]
12294pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12295    let mut ret_val: poly64x2x3_t = transmute(vld3q_s64(transmute(a)));
12296    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12297    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12298    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12299    ret_val
12300}
12301#[doc = "Load multiple 3-element structures to three registers"]
12302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12303#[doc = "## Safety"]
12304#[doc = "  * Neon intrinsic unsafe"]
12305#[inline(always)]
12306#[target_feature(enable = "neon")]
12307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12308#[cfg_attr(test, assert_instr(ld3))]
12309pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12310    transmute(vld3q_s64(transmute(a)))
12311}
12312#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"]
12314#[doc = "## Safety"]
12315#[doc = "  * Neon intrinsic unsafe"]
12316#[inline(always)]
12317#[target_feature(enable = "neon")]
12318#[cfg_attr(test, assert_instr(ld4r))]
12319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12320pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t {
12321    unsafe extern "unadjusted" {
12322        #[cfg_attr(
12323            any(target_arch = "aarch64", target_arch = "arm64ec"),
12324            link_name = "llvm.aarch64.neon.ld4r.v1f64.p0"
12325        )]
12326        fn _vld4_dup_f64(ptr: *const f64) -> float64x1x4_t;
12327    }
12328    _vld4_dup_f64(a as _)
12329}
12330#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"]
12332#[doc = "## Safety"]
12333#[doc = "  * Neon intrinsic unsafe"]
12334#[inline(always)]
12335#[target_feature(enable = "neon")]
12336#[cfg_attr(test, assert_instr(ld4r))]
12337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12338pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t {
12339    unsafe extern "unadjusted" {
12340        #[cfg_attr(
12341            any(target_arch = "aarch64", target_arch = "arm64ec"),
12342            link_name = "llvm.aarch64.neon.ld4r.v2f64.p0"
12343        )]
12344        fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t;
12345    }
12346    _vld4q_dup_f64(a as _)
12347}
12348#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"]
12350#[doc = "## Safety"]
12351#[doc = "  * Neon intrinsic unsafe"]
12352#[inline(always)]
12353#[target_feature(enable = "neon")]
12354#[cfg_attr(test, assert_instr(ld4r))]
12355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12356pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t {
12357    unsafe extern "unadjusted" {
12358        #[cfg_attr(
12359            any(target_arch = "aarch64", target_arch = "arm64ec"),
12360            link_name = "llvm.aarch64.neon.ld4r.v2i64.p0"
12361        )]
12362        fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t;
12363    }
12364    _vld4q_dup_s64(a as _)
12365}
12366#[doc = "Load multiple 4-element structures to four registers"]
12367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"]
12368#[doc = "## Safety"]
12369#[doc = "  * Neon intrinsic unsafe"]
12370#[inline(always)]
12371#[target_feature(enable = "neon")]
12372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12373#[cfg_attr(test, assert_instr(nop))]
12374pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t {
12375    crate::ptr::read_unaligned(a.cast())
12376}
12377#[doc = "Load multiple 4-element structures to four registers"]
12378#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"]
12379#[doc = "## Safety"]
12380#[doc = "  * Neon intrinsic unsafe"]
12381#[inline(always)]
12382#[target_feature(enable = "neon")]
12383#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12384#[rustc_legacy_const_generics(2)]
12385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12386pub unsafe fn vld4_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x4_t) -> float64x1x4_t {
12387    static_assert!(LANE == 0);
12388    unsafe extern "unadjusted" {
12389        #[cfg_attr(
12390            any(target_arch = "aarch64", target_arch = "arm64ec"),
12391            link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0"
12392        )]
12393        fn _vld4_lane_f64(
12394            a: float64x1_t,
12395            b: float64x1_t,
12396            c: float64x1_t,
12397            d: float64x1_t,
12398            n: i64,
12399            ptr: *const i8,
12400        ) -> float64x1x4_t;
12401    }
12402    _vld4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12403}
12404#[doc = "Load multiple 4-element structures to four registers"]
12405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"]
12406#[doc = "## Safety"]
12407#[doc = "  * Neon intrinsic unsafe"]
12408#[inline(always)]
12409#[target_feature(enable = "neon")]
12410#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12411#[rustc_legacy_const_generics(2)]
12412#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12413pub unsafe fn vld4_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x4_t) -> int64x1x4_t {
12414    static_assert!(LANE == 0);
12415    unsafe extern "unadjusted" {
12416        #[cfg_attr(
12417            any(target_arch = "aarch64", target_arch = "arm64ec"),
12418            link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0"
12419        )]
12420        fn _vld4_lane_s64(
12421            a: int64x1_t,
12422            b: int64x1_t,
12423            c: int64x1_t,
12424            d: int64x1_t,
12425            n: i64,
12426            ptr: *const i8,
12427        ) -> int64x1x4_t;
12428    }
12429    _vld4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12430}
12431#[doc = "Load multiple 4-element structures to four registers"]
12432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"]
12433#[doc = "## Safety"]
12434#[doc = "  * Neon intrinsic unsafe"]
12435#[inline(always)]
12436#[target_feature(enable = "neon,aes")]
12437#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12438#[rustc_legacy_const_generics(2)]
12439#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12440pub unsafe fn vld4_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t {
12441    static_assert!(LANE == 0);
12442    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12443}
12444#[doc = "Load multiple 4-element structures to four registers"]
12445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"]
12446#[doc = "## Safety"]
12447#[doc = "  * Neon intrinsic unsafe"]
12448#[inline(always)]
12449#[target_feature(enable = "neon")]
12450#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12451#[rustc_legacy_const_generics(2)]
12452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12453pub unsafe fn vld4_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t {
12454    static_assert!(LANE == 0);
12455    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12456}
12457#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12459#[doc = "## Safety"]
12460#[doc = "  * Neon intrinsic unsafe"]
12461#[inline(always)]
12462#[cfg(target_endian = "little")]
12463#[target_feature(enable = "neon,aes")]
12464#[cfg_attr(test, assert_instr(ld4r))]
12465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12466pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12467    transmute(vld4q_dup_s64(transmute(a)))
12468}
12469#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12471#[doc = "## Safety"]
12472#[doc = "  * Neon intrinsic unsafe"]
12473#[inline(always)]
12474#[cfg(target_endian = "big")]
12475#[target_feature(enable = "neon,aes")]
12476#[cfg_attr(test, assert_instr(ld4r))]
12477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12478pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12479    let mut ret_val: poly64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12480    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12481    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12482    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12483    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12484    ret_val
12485}
12486#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12488#[doc = "## Safety"]
12489#[doc = "  * Neon intrinsic unsafe"]
12490#[inline(always)]
12491#[cfg(target_endian = "little")]
12492#[target_feature(enable = "neon")]
12493#[cfg_attr(test, assert_instr(ld4r))]
12494#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12495pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12496    transmute(vld4q_dup_s64(transmute(a)))
12497}
12498#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12500#[doc = "## Safety"]
12501#[doc = "  * Neon intrinsic unsafe"]
12502#[inline(always)]
12503#[cfg(target_endian = "big")]
12504#[target_feature(enable = "neon")]
12505#[cfg_attr(test, assert_instr(ld4r))]
12506#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12507pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12508    let mut ret_val: uint64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12509    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12510    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12511    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12512    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12513    ret_val
12514}
12515#[doc = "Load multiple 4-element structures to four registers"]
12516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"]
12517#[doc = "## Safety"]
12518#[doc = "  * Neon intrinsic unsafe"]
12519#[inline(always)]
12520#[target_feature(enable = "neon")]
12521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12522#[cfg_attr(test, assert_instr(ld4))]
12523pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t {
12524    crate::core_arch::macros::deinterleaving_load!(f64, 2, 4, a)
12525}
12526#[doc = "Load multiple 4-element structures to four registers"]
12527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"]
12528#[doc = "## Safety"]
12529#[doc = "  * Neon intrinsic unsafe"]
12530#[inline(always)]
12531#[target_feature(enable = "neon")]
12532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12533#[cfg_attr(test, assert_instr(ld4))]
12534pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t {
12535    crate::core_arch::macros::deinterleaving_load!(i64, 2, 4, a)
12536}
12537#[doc = "Load multiple 4-element structures to four registers"]
12538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"]
12539#[doc = "## Safety"]
12540#[doc = "  * Neon intrinsic unsafe"]
12541#[inline(always)]
12542#[target_feature(enable = "neon")]
12543#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12544#[rustc_legacy_const_generics(2)]
12545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12546pub unsafe fn vld4q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x4_t) -> float64x2x4_t {
12547    static_assert_uimm_bits!(LANE, 1);
12548    unsafe extern "unadjusted" {
12549        #[cfg_attr(
12550            any(target_arch = "aarch64", target_arch = "arm64ec"),
12551            link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0"
12552        )]
12553        fn _vld4q_lane_f64(
12554            a: float64x2_t,
12555            b: float64x2_t,
12556            c: float64x2_t,
12557            d: float64x2_t,
12558            n: i64,
12559            ptr: *const i8,
12560        ) -> float64x2x4_t;
12561    }
12562    _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12563}
12564#[doc = "Load multiple 4-element structures to four registers"]
12565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"]
12566#[doc = "## Safety"]
12567#[doc = "  * Neon intrinsic unsafe"]
12568#[inline(always)]
12569#[target_feature(enable = "neon")]
12570#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12571#[rustc_legacy_const_generics(2)]
12572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12573pub unsafe fn vld4q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x4_t) -> int8x16x4_t {
12574    static_assert_uimm_bits!(LANE, 3);
12575    unsafe extern "unadjusted" {
12576        #[cfg_attr(
12577            any(target_arch = "aarch64", target_arch = "arm64ec"),
12578            link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0"
12579        )]
12580        fn _vld4q_lane_s8(
12581            a: int8x16_t,
12582            b: int8x16_t,
12583            c: int8x16_t,
12584            d: int8x16_t,
12585            n: i64,
12586            ptr: *const i8,
12587        ) -> int8x16x4_t;
12588    }
12589    _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12590}
12591#[doc = "Load multiple 4-element structures to four registers"]
12592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"]
12593#[doc = "## Safety"]
12594#[doc = "  * Neon intrinsic unsafe"]
12595#[inline(always)]
12596#[target_feature(enable = "neon")]
12597#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12598#[rustc_legacy_const_generics(2)]
12599#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12600pub unsafe fn vld4q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x4_t) -> int64x2x4_t {
12601    static_assert_uimm_bits!(LANE, 1);
12602    unsafe extern "unadjusted" {
12603        #[cfg_attr(
12604            any(target_arch = "aarch64", target_arch = "arm64ec"),
12605            link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0"
12606        )]
12607        fn _vld4q_lane_s64(
12608            a: int64x2_t,
12609            b: int64x2_t,
12610            c: int64x2_t,
12611            d: int64x2_t,
12612            n: i64,
12613            ptr: *const i8,
12614        ) -> int64x2x4_t;
12615    }
12616    _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12617}
12618#[doc = "Load multiple 4-element structures to four registers"]
12619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"]
12620#[doc = "## Safety"]
12621#[doc = "  * Neon intrinsic unsafe"]
12622#[inline(always)]
12623#[target_feature(enable = "neon,aes")]
12624#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12625#[rustc_legacy_const_generics(2)]
12626#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12627pub unsafe fn vld4q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t {
12628    static_assert_uimm_bits!(LANE, 1);
12629    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12630}
12631#[doc = "Load multiple 4-element structures to four registers"]
12632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"]
12633#[doc = "## Safety"]
12634#[doc = "  * Neon intrinsic unsafe"]
12635#[inline(always)]
12636#[target_feature(enable = "neon")]
12637#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12638#[rustc_legacy_const_generics(2)]
12639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12640pub unsafe fn vld4q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t {
12641    static_assert_uimm_bits!(LANE, 4);
12642    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12643}
12644#[doc = "Load multiple 4-element structures to four registers"]
12645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"]
12646#[doc = "## Safety"]
12647#[doc = "  * Neon intrinsic unsafe"]
12648#[inline(always)]
12649#[target_feature(enable = "neon")]
12650#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12651#[rustc_legacy_const_generics(2)]
12652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12653pub unsafe fn vld4q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t {
12654    static_assert_uimm_bits!(LANE, 1);
12655    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12656}
12657#[doc = "Load multiple 4-element structures to four registers"]
12658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"]
12659#[doc = "## Safety"]
12660#[doc = "  * Neon intrinsic unsafe"]
12661#[inline(always)]
12662#[target_feature(enable = "neon")]
12663#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12664#[rustc_legacy_const_generics(2)]
12665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12666pub unsafe fn vld4q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t {
12667    static_assert_uimm_bits!(LANE, 4);
12668    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12669}
12670#[doc = "Load multiple 4-element structures to four registers"]
12671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12672#[doc = "## Safety"]
12673#[doc = "  * Neon intrinsic unsafe"]
12674#[inline(always)]
12675#[cfg(target_endian = "little")]
12676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12677#[target_feature(enable = "neon,aes")]
12678#[cfg_attr(test, assert_instr(ld4))]
12679pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12680    transmute(vld4q_s64(transmute(a)))
12681}
12682#[doc = "Load multiple 4-element structures to four registers"]
12683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12684#[doc = "## Safety"]
12685#[doc = "  * Neon intrinsic unsafe"]
12686#[inline(always)]
12687#[cfg(target_endian = "big")]
12688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12689#[target_feature(enable = "neon,aes")]
12690#[cfg_attr(test, assert_instr(ld4))]
12691pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12692    let mut ret_val: poly64x2x4_t = transmute(vld4q_s64(transmute(a)));
12693    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12694    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12695    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12696    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12697    ret_val
12698}
12699#[doc = "Load multiple 4-element structures to four registers"]
12700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12701#[doc = "## Safety"]
12702#[doc = "  * Neon intrinsic unsafe"]
12703#[inline(always)]
12704#[target_feature(enable = "neon")]
12705#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12706#[cfg_attr(test, assert_instr(ld4))]
12707pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12708    transmute(vld4q_s64(transmute(a)))
12709}
12710#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_s64)"]
12712#[doc = "## Safety"]
12713#[doc = "  * Neon intrinsic unsafe"]
12714#[inline(always)]
12715#[target_feature(enable = "neon,rcpc3")]
12716#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12717#[rustc_legacy_const_generics(2)]
12718#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12719#[cfg(target_has_atomic = "64")]
12720pub unsafe fn vldap1_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x1_t) -> int64x1_t {
12721    static_assert!(LANE == 0);
12722    let atomic_src = crate::sync::atomic::AtomicI64::from_ptr(ptr as *mut i64);
12723    simd_insert!(
12724        src,
12725        LANE as u32,
12726        atomic_src.load(crate::sync::atomic::Ordering::Acquire)
12727    )
12728}
12729#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_s64)"]
12731#[doc = "## Safety"]
12732#[doc = "  * Neon intrinsic unsafe"]
12733#[inline(always)]
12734#[target_feature(enable = "neon,rcpc3")]
12735#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12736#[rustc_legacy_const_generics(2)]
12737#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12738#[cfg(target_has_atomic = "64")]
12739pub unsafe fn vldap1q_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x2_t) -> int64x2_t {
12740    static_assert_uimm_bits!(LANE, 1);
12741    let atomic_src = crate::sync::atomic::AtomicI64::from_ptr(ptr as *mut i64);
12742    simd_insert!(
12743        src,
12744        LANE as u32,
12745        atomic_src.load(crate::sync::atomic::Ordering::Acquire)
12746    )
12747}
12748#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_f64)"]
12750#[doc = "## Safety"]
12751#[doc = "  * Neon intrinsic unsafe"]
12752#[inline(always)]
12753#[rustc_legacy_const_generics(2)]
12754#[target_feature(enable = "neon,rcpc3")]
12755#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12756#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12757#[cfg(target_has_atomic = "64")]
12758pub unsafe fn vldap1q_lane_f64<const LANE: i32>(ptr: *const f64, src: float64x2_t) -> float64x2_t {
12759    static_assert_uimm_bits!(LANE, 1);
12760    transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12761}
12762#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_u64)"]
12764#[doc = "## Safety"]
12765#[doc = "  * Neon intrinsic unsafe"]
12766#[inline(always)]
12767#[rustc_legacy_const_generics(2)]
12768#[target_feature(enable = "neon,rcpc3")]
12769#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12770#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12771#[cfg(target_has_atomic = "64")]
12772pub unsafe fn vldap1_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x1_t) -> uint64x1_t {
12773    static_assert!(LANE == 0);
12774    transmute(vldap1_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12775}
12776#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_u64)"]
12778#[doc = "## Safety"]
12779#[doc = "  * Neon intrinsic unsafe"]
12780#[inline(always)]
12781#[rustc_legacy_const_generics(2)]
12782#[target_feature(enable = "neon,rcpc3")]
12783#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12784#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12785#[cfg(target_has_atomic = "64")]
12786pub unsafe fn vldap1q_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x2_t) -> uint64x2_t {
12787    static_assert_uimm_bits!(LANE, 1);
12788    transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12789}
12790#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_p64)"]
12792#[doc = "## Safety"]
12793#[doc = "  * Neon intrinsic unsafe"]
12794#[inline(always)]
12795#[rustc_legacy_const_generics(2)]
12796#[target_feature(enable = "neon,rcpc3")]
12797#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12798#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12799#[cfg(target_has_atomic = "64")]
12800pub unsafe fn vldap1_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x1_t) -> poly64x1_t {
12801    static_assert!(LANE == 0);
12802    transmute(vldap1_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12803}
12804#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_p64)"]
12806#[doc = "## Safety"]
12807#[doc = "  * Neon intrinsic unsafe"]
12808#[inline(always)]
12809#[rustc_legacy_const_generics(2)]
12810#[target_feature(enable = "neon,rcpc3")]
12811#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12812#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12813#[cfg(target_has_atomic = "64")]
12814pub unsafe fn vldap1q_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x2_t) -> poly64x2_t {
12815    static_assert_uimm_bits!(LANE, 1);
12816    transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12817}
12818#[doc = "Lookup table read with 2-bit indices"]
12819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_f16)"]
12820#[doc = "## Safety"]
12821#[doc = "  * Neon intrinsic unsafe"]
12822#[inline(always)]
12823#[target_feature(enable = "neon,lut")]
12824#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12825#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12826#[rustc_legacy_const_generics(2)]
12827pub unsafe fn vluti2_lane_f16<const INDEX: i32>(a: float16x4_t, b: uint8x8_t) -> float16x8_t {
12828    static_assert!(INDEX >= 0 && INDEX <= 3);
12829    transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
12830}
12831#[doc = "Lookup table read with 2-bit indices"]
12832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_f16)"]
12833#[doc = "## Safety"]
12834#[doc = "  * Neon intrinsic unsafe"]
12835#[inline(always)]
12836#[target_feature(enable = "neon,lut")]
12837#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12838#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12839#[rustc_legacy_const_generics(2)]
12840pub unsafe fn vluti2q_lane_f16<const INDEX: i32>(a: float16x8_t, b: uint8x8_t) -> float16x8_t {
12841    static_assert!(INDEX >= 0 && INDEX <= 3);
12842    transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
12843}
12844#[doc = "Lookup table read with 2-bit indices"]
12845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u8)"]
12846#[doc = "## Safety"]
12847#[doc = "  * Neon intrinsic unsafe"]
12848#[inline(always)]
12849#[target_feature(enable = "neon,lut")]
12850#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12851#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12852#[rustc_legacy_const_generics(2)]
12853pub unsafe fn vluti2_lane_u8<const INDEX: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t {
12854    static_assert!(INDEX >= 0 && INDEX <= 1);
12855    transmute(vluti2_lane_s8::<INDEX>(transmute(a), b))
12856}
12857#[doc = "Lookup table read with 2-bit indices"]
12858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u8)"]
12859#[doc = "## Safety"]
12860#[doc = "  * Neon intrinsic unsafe"]
12861#[inline(always)]
12862#[target_feature(enable = "neon,lut")]
12863#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12864#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12865#[rustc_legacy_const_generics(2)]
12866pub unsafe fn vluti2q_lane_u8<const INDEX: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
12867    static_assert!(INDEX >= 0 && INDEX <= 1);
12868    transmute(vluti2q_lane_s8::<INDEX>(transmute(a), b))
12869}
12870#[doc = "Lookup table read with 2-bit indices"]
12871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u16)"]
12872#[doc = "## Safety"]
12873#[doc = "  * Neon intrinsic unsafe"]
12874#[inline(always)]
12875#[target_feature(enable = "neon,lut")]
12876#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12877#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12878#[rustc_legacy_const_generics(2)]
12879pub unsafe fn vluti2_lane_u16<const INDEX: i32>(a: uint16x4_t, b: uint8x8_t) -> uint16x8_t {
12880    static_assert!(INDEX >= 0 && INDEX <= 3);
12881    transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
12882}
12883#[doc = "Lookup table read with 2-bit indices"]
12884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u16)"]
12885#[doc = "## Safety"]
12886#[doc = "  * Neon intrinsic unsafe"]
12887#[inline(always)]
12888#[target_feature(enable = "neon,lut")]
12889#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12890#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12891#[rustc_legacy_const_generics(2)]
12892pub unsafe fn vluti2q_lane_u16<const INDEX: i32>(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t {
12893    static_assert!(INDEX >= 0 && INDEX <= 3);
12894    transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
12895}
12896#[doc = "Lookup table read with 2-bit indices"]
12897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p8)"]
12898#[doc = "## Safety"]
12899#[doc = "  * Neon intrinsic unsafe"]
12900#[inline(always)]
12901#[target_feature(enable = "neon,lut")]
12902#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12903#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12904#[rustc_legacy_const_generics(2)]
12905pub unsafe fn vluti2_lane_p8<const INDEX: i32>(a: poly8x8_t, b: uint8x8_t) -> poly8x16_t {
12906    static_assert!(INDEX >= 0 && INDEX <= 1);
12907    transmute(vluti2_lane_s8::<INDEX>(transmute(a), b))
12908}
12909#[doc = "Lookup table read with 2-bit indices"]
12910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p8)"]
12911#[doc = "## Safety"]
12912#[doc = "  * Neon intrinsic unsafe"]
12913#[inline(always)]
12914#[target_feature(enable = "neon,lut")]
12915#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12916#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12917#[rustc_legacy_const_generics(2)]
12918pub unsafe fn vluti2q_lane_p8<const INDEX: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
12919    static_assert!(INDEX >= 0 && INDEX <= 1);
12920    transmute(vluti2q_lane_s8::<INDEX>(transmute(a), b))
12921}
12922#[doc = "Lookup table read with 2-bit indices"]
12923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p16)"]
12924#[doc = "## Safety"]
12925#[doc = "  * Neon intrinsic unsafe"]
12926#[inline(always)]
12927#[target_feature(enable = "neon,lut")]
12928#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12929#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12930#[rustc_legacy_const_generics(2)]
12931pub unsafe fn vluti2_lane_p16<const INDEX: i32>(a: poly16x4_t, b: uint8x8_t) -> poly16x8_t {
12932    static_assert!(INDEX >= 0 && INDEX <= 3);
12933    transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
12934}
12935#[doc = "Lookup table read with 2-bit indices"]
12936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p16)"]
12937#[doc = "## Safety"]
12938#[doc = "  * Neon intrinsic unsafe"]
12939#[inline(always)]
12940#[target_feature(enable = "neon,lut")]
12941#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12942#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12943#[rustc_legacy_const_generics(2)]
12944pub unsafe fn vluti2q_lane_p16<const INDEX: i32>(a: poly16x8_t, b: uint8x8_t) -> poly16x8_t {
12945    static_assert!(INDEX >= 0 && INDEX <= 3);
12946    transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
12947}
12948#[doc = "Lookup table read with 2-bit indices"]
12949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s8)"]
12950#[doc = "## Safety"]
12951#[doc = "  * Neon intrinsic unsafe"]
12952#[inline(always)]
12953#[target_feature(enable = "neon,lut")]
12954#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12955#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12956#[rustc_legacy_const_generics(2)]
12957pub unsafe fn vluti2_lane_s8<const LANE: i32>(a: int8x8_t, b: uint8x8_t) -> int8x16_t {
12958    static_assert!(LANE >= 0 && LANE <= 1);
12959    unsafe extern "unadjusted" {
12960        #[cfg_attr(
12961            any(target_arch = "aarch64", target_arch = "arm64ec"),
12962            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v8i8"
12963        )]
12964        fn _vluti2_lane_s8(a: int8x8_t, b: uint8x8_t, n: i32) -> int8x16_t;
12965    }
12966    _vluti2_lane_s8(a, b, LANE)
12967}
12968#[doc = "Lookup table read with 2-bit indices"]
12969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s8)"]
12970#[doc = "## Safety"]
12971#[doc = "  * Neon intrinsic unsafe"]
12972#[inline(always)]
12973#[target_feature(enable = "neon,lut")]
12974#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12975#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12976#[rustc_legacy_const_generics(2)]
12977pub unsafe fn vluti2q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
12978    static_assert!(LANE >= 0 && LANE <= 1);
12979    unsafe extern "unadjusted" {
12980        #[cfg_attr(
12981            any(target_arch = "aarch64", target_arch = "arm64ec"),
12982            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v16i8"
12983        )]
12984        fn _vluti2q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
12985    }
12986    _vluti2q_lane_s8(a, b, LANE)
12987}
12988#[doc = "Lookup table read with 2-bit indices"]
12989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s16)"]
12990#[doc = "## Safety"]
12991#[doc = "  * Neon intrinsic unsafe"]
12992#[inline(always)]
12993#[target_feature(enable = "neon,lut")]
12994#[cfg_attr(test, assert_instr(nop, LANE = 1))]
12995#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12996#[rustc_legacy_const_generics(2)]
12997pub unsafe fn vluti2_lane_s16<const LANE: i32>(a: int16x4_t, b: uint8x8_t) -> int16x8_t {
12998    static_assert!(LANE >= 0 && LANE <= 3);
12999    unsafe extern "unadjusted" {
13000        #[cfg_attr(
13001            any(target_arch = "aarch64", target_arch = "arm64ec"),
13002            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v4i16"
13003        )]
13004        fn _vluti2_lane_s16(a: int16x4_t, b: uint8x8_t, n: i32) -> int16x8_t;
13005    }
13006    _vluti2_lane_s16(a, b, LANE)
13007}
13008#[doc = "Lookup table read with 2-bit indices"]
13009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s16)"]
13010#[doc = "## Safety"]
13011#[doc = "  * Neon intrinsic unsafe"]
13012#[inline(always)]
13013#[target_feature(enable = "neon,lut")]
13014#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13015#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13016#[rustc_legacy_const_generics(2)]
13017pub unsafe fn vluti2q_lane_s16<const LANE: i32>(a: int16x8_t, b: uint8x8_t) -> int16x8_t {
13018    static_assert!(LANE >= 0 && LANE <= 3);
13019    unsafe extern "unadjusted" {
13020        #[cfg_attr(
13021            any(target_arch = "aarch64", target_arch = "arm64ec"),
13022            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v8i16"
13023        )]
13024        fn _vluti2q_lane_s16(a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
13025    }
13026    _vluti2q_lane_s16(a, b, LANE)
13027}
13028#[doc = "Lookup table read with 2-bit indices"]
13029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_f16)"]
13030#[doc = "## Safety"]
13031#[doc = "  * Neon intrinsic unsafe"]
13032#[inline(always)]
13033#[target_feature(enable = "neon,lut")]
13034#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13035#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13036#[rustc_legacy_const_generics(2)]
13037pub unsafe fn vluti2_laneq_f16<const INDEX: i32>(a: float16x4_t, b: uint8x16_t) -> float16x8_t {
13038    static_assert!(INDEX >= 0 && INDEX <= 7);
13039    transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13040}
13041#[doc = "Lookup table read with 2-bit indices"]
13042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_f16)"]
13043#[doc = "## Safety"]
13044#[doc = "  * Neon intrinsic unsafe"]
13045#[inline(always)]
13046#[target_feature(enable = "neon,lut")]
13047#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13048#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13049#[rustc_legacy_const_generics(2)]
13050pub unsafe fn vluti2q_laneq_f16<const INDEX: i32>(a: float16x8_t, b: uint8x16_t) -> float16x8_t {
13051    static_assert!(INDEX >= 0 && INDEX <= 7);
13052    transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13053}
13054#[doc = "Lookup table read with 2-bit indices"]
13055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_u8)"]
13056#[doc = "## Safety"]
13057#[doc = "  * Neon intrinsic unsafe"]
13058#[inline(always)]
13059#[target_feature(enable = "neon,lut")]
13060#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13061#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13062#[rustc_legacy_const_generics(2)]
13063pub unsafe fn vluti2_laneq_u8<const INDEX: i32>(a: uint8x8_t, b: uint8x16_t) -> uint8x16_t {
13064    static_assert!(INDEX >= 0 && INDEX <= 3);
13065    transmute(vluti2_laneq_s8::<INDEX>(transmute(a), b))
13066}
13067#[doc = "Lookup table read with 2-bit indices"]
13068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_u8)"]
13069#[doc = "## Safety"]
13070#[doc = "  * Neon intrinsic unsafe"]
13071#[inline(always)]
13072#[target_feature(enable = "neon,lut")]
13073#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13074#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13075#[rustc_legacy_const_generics(2)]
13076pub unsafe fn vluti2q_laneq_u8<const INDEX: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13077    static_assert!(INDEX >= 0 && INDEX <= 3);
13078    transmute(vluti2q_laneq_s8::<INDEX>(transmute(a), b))
13079}
13080#[doc = "Lookup table read with 2-bit indices"]
13081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_u16)"]
13082#[doc = "## Safety"]
13083#[doc = "  * Neon intrinsic unsafe"]
13084#[inline(always)]
13085#[target_feature(enable = "neon,lut")]
13086#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13087#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13088#[rustc_legacy_const_generics(2)]
13089pub unsafe fn vluti2_laneq_u16<const INDEX: i32>(a: uint16x4_t, b: uint8x16_t) -> uint16x8_t {
13090    static_assert!(INDEX >= 0 && INDEX <= 7);
13091    transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13092}
13093#[doc = "Lookup table read with 2-bit indices"]
13094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_u16)"]
13095#[doc = "## Safety"]
13096#[doc = "  * Neon intrinsic unsafe"]
13097#[inline(always)]
13098#[target_feature(enable = "neon,lut")]
13099#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13100#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13101#[rustc_legacy_const_generics(2)]
13102pub unsafe fn vluti2q_laneq_u16<const INDEX: i32>(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
13103    static_assert!(INDEX >= 0 && INDEX <= 7);
13104    transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13105}
13106#[doc = "Lookup table read with 2-bit indices"]
13107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_p8)"]
13108#[doc = "## Safety"]
13109#[doc = "  * Neon intrinsic unsafe"]
13110#[inline(always)]
13111#[target_feature(enable = "neon,lut")]
13112#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13113#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13114#[rustc_legacy_const_generics(2)]
13115pub unsafe fn vluti2_laneq_p8<const INDEX: i32>(a: poly8x8_t, b: uint8x16_t) -> poly8x16_t {
13116    static_assert!(INDEX >= 0 && INDEX <= 3);
13117    transmute(vluti2_laneq_s8::<INDEX>(transmute(a), b))
13118}
13119#[doc = "Lookup table read with 2-bit indices"]
13120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_p8)"]
13121#[doc = "## Safety"]
13122#[doc = "  * Neon intrinsic unsafe"]
13123#[inline(always)]
13124#[target_feature(enable = "neon,lut")]
13125#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13126#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13127#[rustc_legacy_const_generics(2)]
13128pub unsafe fn vluti2q_laneq_p8<const INDEX: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13129    static_assert!(INDEX >= 0 && INDEX <= 3);
13130    transmute(vluti2q_laneq_s8::<INDEX>(transmute(a), b))
13131}
13132#[doc = "Lookup table read with 2-bit indices"]
13133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_p16)"]
13134#[doc = "## Safety"]
13135#[doc = "  * Neon intrinsic unsafe"]
13136#[inline(always)]
13137#[target_feature(enable = "neon,lut")]
13138#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13139#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13140#[rustc_legacy_const_generics(2)]
13141pub unsafe fn vluti2_laneq_p16<const INDEX: i32>(a: poly16x4_t, b: uint8x16_t) -> poly16x8_t {
13142    static_assert!(INDEX >= 0 && INDEX <= 7);
13143    transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13144}
13145#[doc = "Lookup table read with 2-bit indices"]
13146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_p16)"]
13147#[doc = "## Safety"]
13148#[doc = "  * Neon intrinsic unsafe"]
13149#[inline(always)]
13150#[target_feature(enable = "neon,lut")]
13151#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13152#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13153#[rustc_legacy_const_generics(2)]
13154pub unsafe fn vluti2q_laneq_p16<const INDEX: i32>(a: poly16x8_t, b: uint8x16_t) -> poly16x8_t {
13155    static_assert!(INDEX >= 0 && INDEX <= 7);
13156    transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13157}
13158#[doc = "Lookup table read with 2-bit indices"]
13159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_s8)"]
13160#[doc = "## Safety"]
13161#[doc = "  * Neon intrinsic unsafe"]
13162#[inline(always)]
13163#[target_feature(enable = "neon,lut")]
13164#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13165#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13166#[rustc_legacy_const_generics(2)]
13167pub unsafe fn vluti2_laneq_s8<const INDEX: i32>(a: int8x8_t, b: uint8x16_t) -> int8x16_t {
13168    static_assert!(INDEX >= 0 && INDEX <= 3);
13169    unsafe extern "unadjusted" {
13170        #[cfg_attr(
13171            any(target_arch = "aarch64", target_arch = "arm64ec"),
13172            link_name = "llvm.aarch64.neon.vluti2.laneq.v16i8.v8i8"
13173        )]
13174        fn _vluti2_laneq_s8(a: int8x8_t, b: uint8x16_t, n: i32) -> int8x16_t;
13175    }
13176    _vluti2_laneq_s8(a, b, INDEX)
13177}
13178#[doc = "Lookup table read with 2-bit indices"]
13179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_s8)"]
13180#[doc = "## Safety"]
13181#[doc = "  * Neon intrinsic unsafe"]
13182#[inline(always)]
13183#[target_feature(enable = "neon,lut")]
13184#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13185#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13186#[rustc_legacy_const_generics(2)]
13187pub unsafe fn vluti2q_laneq_s8<const INDEX: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13188    static_assert!(INDEX >= 0 && INDEX <= 3);
13189    unsafe extern "unadjusted" {
13190        #[cfg_attr(
13191            any(target_arch = "aarch64", target_arch = "arm64ec"),
13192            link_name = "llvm.aarch64.neon.vluti2.laneq.v16i8.v16i8"
13193        )]
13194        fn _vluti2q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13195    }
13196    _vluti2q_laneq_s8(a, b, INDEX)
13197}
13198#[doc = "Lookup table read with 2-bit indices"]
13199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_s16)"]
13200#[doc = "## Safety"]
13201#[doc = "  * Neon intrinsic unsafe"]
13202#[inline(always)]
13203#[target_feature(enable = "neon,lut")]
13204#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13205#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13206#[rustc_legacy_const_generics(2)]
13207pub unsafe fn vluti2_laneq_s16<const INDEX: i32>(a: int16x4_t, b: uint8x16_t) -> int16x8_t {
13208    static_assert!(INDEX >= 0 && INDEX <= 7);
13209    unsafe extern "unadjusted" {
13210        #[cfg_attr(
13211            any(target_arch = "aarch64", target_arch = "arm64ec"),
13212            link_name = "llvm.aarch64.neon.vluti2.laneq.v8i16.v4i16"
13213        )]
13214        fn _vluti2_laneq_s16(a: int16x4_t, b: uint8x16_t, n: i32) -> int16x8_t;
13215    }
13216    _vluti2_laneq_s16(a, b, INDEX)
13217}
13218#[doc = "Lookup table read with 2-bit indices"]
13219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_s16)"]
13220#[doc = "## Safety"]
13221#[doc = "  * Neon intrinsic unsafe"]
13222#[inline(always)]
13223#[target_feature(enable = "neon,lut")]
13224#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13225#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13226#[rustc_legacy_const_generics(2)]
13227pub unsafe fn vluti2q_laneq_s16<const INDEX: i32>(a: int16x8_t, b: uint8x16_t) -> int16x8_t {
13228    static_assert!(INDEX >= 0 && INDEX <= 7);
13229    unsafe extern "unadjusted" {
13230        #[cfg_attr(
13231            any(target_arch = "aarch64", target_arch = "arm64ec"),
13232            link_name = "llvm.aarch64.neon.vluti2.laneq.v8i16.v8i16"
13233        )]
13234        fn _vluti2q_laneq_s16(a: int16x8_t, b: uint8x16_t, n: i32) -> int16x8_t;
13235    }
13236    _vluti2q_laneq_s16(a, b, INDEX)
13237}
13238#[doc = "Lookup table read with 4-bit indices"]
13239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_f16_x2)"]
13240#[doc = "## Safety"]
13241#[doc = "  * Neon intrinsic unsafe"]
13242#[inline(always)]
13243#[target_feature(enable = "neon,lut,fp16")]
13244#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13245#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13246#[rustc_legacy_const_generics(2)]
13247pub unsafe fn vluti4q_lane_f16_x2<const LANE: i32>(a: float16x8x2_t, b: uint8x8_t) -> float16x8_t {
13248    static_assert!(LANE >= 0 && LANE <= 1);
13249    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13250}
13251#[doc = "Lookup table read with 4-bit indices"]
13252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u16_x2)"]
13253#[doc = "## Safety"]
13254#[doc = "  * Neon intrinsic unsafe"]
13255#[inline(always)]
13256#[target_feature(enable = "neon,lut")]
13257#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13258#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13259#[rustc_legacy_const_generics(2)]
13260pub unsafe fn vluti4q_lane_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x8_t) -> uint16x8_t {
13261    static_assert!(LANE >= 0 && LANE <= 1);
13262    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13263}
13264#[doc = "Lookup table read with 4-bit indices"]
13265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p16_x2)"]
13266#[doc = "## Safety"]
13267#[doc = "  * Neon intrinsic unsafe"]
13268#[inline(always)]
13269#[target_feature(enable = "neon,lut")]
13270#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13271#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13272#[rustc_legacy_const_generics(2)]
13273pub unsafe fn vluti4q_lane_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x8_t) -> poly16x8_t {
13274    static_assert!(LANE >= 0 && LANE <= 1);
13275    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13276}
13277#[doc = "Lookup table read with 4-bit indices"]
13278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s16_x2)"]
13279#[doc = "## Safety"]
13280#[doc = "  * Neon intrinsic unsafe"]
13281#[inline(always)]
13282#[target_feature(enable = "neon,lut")]
13283#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13284#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13285#[rustc_legacy_const_generics(2)]
13286pub unsafe fn vluti4q_lane_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x8_t) -> int16x8_t {
13287    static_assert!(LANE >= 0 && LANE <= 1);
13288    unsafe extern "unadjusted" {
13289        #[cfg_attr(
13290            any(target_arch = "aarch64", target_arch = "arm64ec"),
13291            link_name = "llvm.aarch64.neon.vluti4q.lane.x2.v8i16"
13292        )]
13293        fn _vluti4q_lane_s16_x2(a: int16x8_t, a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
13294    }
13295    _vluti4q_lane_s16_x2(a.0, a.1, b, LANE)
13296}
13297#[doc = "Lookup table read with 4-bit indices"]
13298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s8)"]
13299#[doc = "## Safety"]
13300#[doc = "  * Neon intrinsic unsafe"]
13301#[inline(always)]
13302#[target_feature(enable = "neon,lut")]
13303#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13304#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13305#[rustc_legacy_const_generics(2)]
13306pub unsafe fn vluti4q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
13307    static_assert!(LANE == 0);
13308    unsafe extern "unadjusted" {
13309        #[cfg_attr(
13310            any(target_arch = "aarch64", target_arch = "arm64ec"),
13311            link_name = "llvm.aarch64.neon.vluti4q.lane.v8i8"
13312        )]
13313        fn _vluti4q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
13314    }
13315    _vluti4q_lane_s8(a, b, LANE)
13316}
13317#[doc = "Lookup table read with 4-bit indices"]
13318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u8)"]
13319#[doc = "## Safety"]
13320#[doc = "  * Neon intrinsic unsafe"]
13321#[inline(always)]
13322#[target_feature(enable = "neon,lut")]
13323#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13324#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13325#[rustc_legacy_const_generics(2)]
13326pub unsafe fn vluti4q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
13327    static_assert!(LANE == 0);
13328    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13329}
13330#[doc = "Lookup table read with 4-bit indices"]
13331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p8)"]
13332#[doc = "## Safety"]
13333#[doc = "  * Neon intrinsic unsafe"]
13334#[inline(always)]
13335#[target_feature(enable = "neon,lut")]
13336#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13337#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13338#[rustc_legacy_const_generics(2)]
13339pub unsafe fn vluti4q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
13340    static_assert!(LANE == 0);
13341    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13342}
13343#[doc = "Lookup table read with 4-bit indices"]
13344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_f16_x2)"]
13345#[doc = "## Safety"]
13346#[doc = "  * Neon intrinsic unsafe"]
13347#[inline(always)]
13348#[target_feature(enable = "neon,lut,fp16")]
13349#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13350#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13351#[rustc_legacy_const_generics(2)]
13352pub unsafe fn vluti4q_laneq_f16_x2<const LANE: i32>(
13353    a: float16x8x2_t,
13354    b: uint8x16_t,
13355) -> float16x8_t {
13356    static_assert!(LANE >= 0 && LANE <= 3);
13357    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13358}
13359#[doc = "Lookup table read with 4-bit indices"]
13360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u16_x2)"]
13361#[doc = "## Safety"]
13362#[doc = "  * Neon intrinsic unsafe"]
13363#[inline(always)]
13364#[target_feature(enable = "neon,lut")]
13365#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13366#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13367#[rustc_legacy_const_generics(2)]
13368pub unsafe fn vluti4q_laneq_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x16_t) -> uint16x8_t {
13369    static_assert!(LANE >= 0 && LANE <= 3);
13370    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13371}
13372#[doc = "Lookup table read with 4-bit indices"]
13373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p16_x2)"]
13374#[doc = "## Safety"]
13375#[doc = "  * Neon intrinsic unsafe"]
13376#[inline(always)]
13377#[target_feature(enable = "neon,lut")]
13378#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13379#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13380#[rustc_legacy_const_generics(2)]
13381pub unsafe fn vluti4q_laneq_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x16_t) -> poly16x8_t {
13382    static_assert!(LANE >= 0 && LANE <= 3);
13383    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13384}
13385#[doc = "Lookup table read with 4-bit indices"]
13386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s16_x2)"]
13387#[doc = "## Safety"]
13388#[doc = "  * Neon intrinsic unsafe"]
13389#[inline(always)]
13390#[target_feature(enable = "neon,lut")]
13391#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13392#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13393#[rustc_legacy_const_generics(2)]
13394pub unsafe fn vluti4q_laneq_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x16_t) -> int16x8_t {
13395    static_assert!(LANE >= 0 && LANE <= 3);
13396    unsafe extern "unadjusted" {
13397        #[cfg_attr(
13398            any(target_arch = "aarch64", target_arch = "arm64ec"),
13399            link_name = "llvm.aarch64.neon.vluti4q.laneq.x2.v8i16"
13400        )]
13401        fn _vluti4q_laneq_s16_x2(a: int16x8_t, b: int16x8_t, c: uint8x16_t, n: i32) -> int16x8_t;
13402    }
13403    _vluti4q_laneq_s16_x2(a.0, a.1, b, LANE)
13404}
13405#[doc = "Lookup table read with 4-bit indices"]
13406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s8)"]
13407#[doc = "## Safety"]
13408#[doc = "  * Neon intrinsic unsafe"]
13409#[inline(always)]
13410#[target_feature(enable = "neon,lut")]
13411#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13412#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13413#[rustc_legacy_const_generics(2)]
13414pub unsafe fn vluti4q_laneq_s8<const LANE: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13415    static_assert!(LANE >= 0 && LANE <= 1);
13416    unsafe extern "unadjusted" {
13417        #[cfg_attr(
13418            any(target_arch = "aarch64", target_arch = "arm64ec"),
13419            link_name = "llvm.aarch64.neon.vluti4q.laneq.v16i8"
13420        )]
13421        fn _vluti4q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13422    }
13423    _vluti4q_laneq_s8(a, b, LANE)
13424}
13425#[doc = "Lookup table read with 4-bit indices"]
13426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u8)"]
13427#[doc = "## Safety"]
13428#[doc = "  * Neon intrinsic unsafe"]
13429#[inline(always)]
13430#[target_feature(enable = "neon,lut")]
13431#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13432#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13433#[rustc_legacy_const_generics(2)]
13434pub unsafe fn vluti4q_laneq_u8<const LANE: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13435    static_assert!(LANE >= 0 && LANE <= 1);
13436    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13437}
13438#[doc = "Lookup table read with 4-bit indices"]
13439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p8)"]
13440#[doc = "## Safety"]
13441#[doc = "  * Neon intrinsic unsafe"]
13442#[inline(always)]
13443#[target_feature(enable = "neon,lut")]
13444#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13445#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13446#[rustc_legacy_const_generics(2)]
13447pub unsafe fn vluti4q_laneq_p8<const LANE: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13448    static_assert!(LANE >= 0 && LANE <= 1);
13449    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13450}
13451#[doc = "Maximum (vector)"]
13452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"]
13453#[inline(always)]
13454#[target_feature(enable = "neon")]
13455#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13456#[cfg_attr(test, assert_instr(fmax))]
13457pub fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13458    unsafe extern "unadjusted" {
13459        #[cfg_attr(
13460            any(target_arch = "aarch64", target_arch = "arm64ec"),
13461            link_name = "llvm.aarch64.neon.fmax.v1f64"
13462        )]
13463        fn _vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13464    }
13465    unsafe { _vmax_f64(a, b) }
13466}
13467#[doc = "Maximum (vector)"]
13468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"]
13469#[inline(always)]
13470#[target_feature(enable = "neon")]
13471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13472#[cfg_attr(test, assert_instr(fmax))]
13473pub fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13474    unsafe extern "unadjusted" {
13475        #[cfg_attr(
13476            any(target_arch = "aarch64", target_arch = "arm64ec"),
13477            link_name = "llvm.aarch64.neon.fmax.v2f64"
13478        )]
13479        fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13480    }
13481    unsafe { _vmaxq_f64(a, b) }
13482}
13483#[doc = "Maximum (vector)"]
13484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxh_f16)"]
13485#[inline(always)]
13486#[target_feature(enable = "neon,fp16")]
13487#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13488#[cfg(not(target_arch = "arm64ec"))]
13489#[cfg_attr(test, assert_instr(fmax))]
13490pub fn vmaxh_f16(a: f16, b: f16) -> f16 {
13491    unsafe extern "unadjusted" {
13492        #[cfg_attr(
13493            any(target_arch = "aarch64", target_arch = "arm64ec"),
13494            link_name = "llvm.aarch64.neon.fmax.f16"
13495        )]
13496        fn _vmaxh_f16(a: f16, b: f16) -> f16;
13497    }
13498    unsafe { _vmaxh_f16(a, b) }
13499}
13500#[doc = "Floating-point Maximum Number (vector)"]
13501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"]
13502#[inline(always)]
13503#[target_feature(enable = "neon")]
13504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13505#[cfg_attr(test, assert_instr(fmaxnm))]
13506pub fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13507    unsafe extern "unadjusted" {
13508        #[cfg_attr(
13509            any(target_arch = "aarch64", target_arch = "arm64ec"),
13510            link_name = "llvm.aarch64.neon.fmaxnm.v1f64"
13511        )]
13512        fn _vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13513    }
13514    unsafe { _vmaxnm_f64(a, b) }
13515}
13516#[doc = "Floating-point Maximum Number (vector)"]
13517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"]
13518#[inline(always)]
13519#[target_feature(enable = "neon")]
13520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13521#[cfg_attr(test, assert_instr(fmaxnm))]
13522pub fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13523    unsafe extern "unadjusted" {
13524        #[cfg_attr(
13525            any(target_arch = "aarch64", target_arch = "arm64ec"),
13526            link_name = "llvm.aarch64.neon.fmaxnm.v2f64"
13527        )]
13528        fn _vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13529    }
13530    unsafe { _vmaxnmq_f64(a, b) }
13531}
13532#[doc = "Floating-point Maximum Number"]
13533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmh_f16)"]
13534#[inline(always)]
13535#[target_feature(enable = "neon,fp16")]
13536#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13537#[cfg(not(target_arch = "arm64ec"))]
13538#[cfg_attr(test, assert_instr(fmaxnm))]
13539pub fn vmaxnmh_f16(a: f16, b: f16) -> f16 {
13540    unsafe extern "unadjusted" {
13541        #[cfg_attr(
13542            any(target_arch = "aarch64", target_arch = "arm64ec"),
13543            link_name = "llvm.aarch64.neon.fmaxnm.f16"
13544        )]
13545        fn _vmaxnmh_f16(a: f16, b: f16) -> f16;
13546    }
13547    unsafe { _vmaxnmh_f16(a, b) }
13548}
13549#[doc = "Floating-point maximum number across vector"]
13550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f16)"]
13551#[inline(always)]
13552#[target_feature(enable = "neon,fp16")]
13553#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13554#[cfg(not(target_arch = "arm64ec"))]
13555#[cfg_attr(test, assert_instr(fmaxnmv))]
13556pub fn vmaxnmv_f16(a: float16x4_t) -> f16 {
13557    unsafe extern "unadjusted" {
13558        #[cfg_attr(
13559            any(target_arch = "aarch64", target_arch = "arm64ec"),
13560            link_name = "llvm.aarch64.neon.fmaxnmv.f16.v4f16"
13561        )]
13562        fn _vmaxnmv_f16(a: float16x4_t) -> f16;
13563    }
13564    unsafe { _vmaxnmv_f16(a) }
13565}
13566#[doc = "Floating-point maximum number across vector"]
13567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f16)"]
13568#[inline(always)]
13569#[target_feature(enable = "neon,fp16")]
13570#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13571#[cfg(not(target_arch = "arm64ec"))]
13572#[cfg_attr(test, assert_instr(fmaxnmv))]
13573pub fn vmaxnmvq_f16(a: float16x8_t) -> f16 {
13574    unsafe extern "unadjusted" {
13575        #[cfg_attr(
13576            any(target_arch = "aarch64", target_arch = "arm64ec"),
13577            link_name = "llvm.aarch64.neon.fmaxnmv.f16.v8f16"
13578        )]
13579        fn _vmaxnmvq_f16(a: float16x8_t) -> f16;
13580    }
13581    unsafe { _vmaxnmvq_f16(a) }
13582}
13583#[doc = "Floating-point maximum number across vector"]
13584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"]
13585#[inline(always)]
13586#[target_feature(enable = "neon")]
13587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13588#[cfg_attr(test, assert_instr(fmaxnmp))]
13589pub fn vmaxnmv_f32(a: float32x2_t) -> f32 {
13590    unsafe extern "unadjusted" {
13591        #[cfg_attr(
13592            any(target_arch = "aarch64", target_arch = "arm64ec"),
13593            link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32"
13594        )]
13595        fn _vmaxnmv_f32(a: float32x2_t) -> f32;
13596    }
13597    unsafe { _vmaxnmv_f32(a) }
13598}
13599#[doc = "Floating-point maximum number across vector"]
13600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"]
13601#[inline(always)]
13602#[target_feature(enable = "neon")]
13603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13604#[cfg_attr(test, assert_instr(fmaxnmp))]
13605pub fn vmaxnmvq_f64(a: float64x2_t) -> f64 {
13606    unsafe extern "unadjusted" {
13607        #[cfg_attr(
13608            any(target_arch = "aarch64", target_arch = "arm64ec"),
13609            link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64"
13610        )]
13611        fn _vmaxnmvq_f64(a: float64x2_t) -> f64;
13612    }
13613    unsafe { _vmaxnmvq_f64(a) }
13614}
13615#[doc = "Floating-point maximum number across vector"]
13616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"]
13617#[inline(always)]
13618#[target_feature(enable = "neon")]
13619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13620#[cfg_attr(test, assert_instr(fmaxnmv))]
13621pub fn vmaxnmvq_f32(a: float32x4_t) -> f32 {
13622    unsafe extern "unadjusted" {
13623        #[cfg_attr(
13624            any(target_arch = "aarch64", target_arch = "arm64ec"),
13625            link_name = "llvm.aarch64.neon.fmaxnmv.f32.v4f32"
13626        )]
13627        fn _vmaxnmvq_f32(a: float32x4_t) -> f32;
13628    }
13629    unsafe { _vmaxnmvq_f32(a) }
13630}
13631#[doc = "Floating-point maximum number across vector"]
13632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f16)"]
13633#[inline(always)]
13634#[target_feature(enable = "neon,fp16")]
13635#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13636#[cfg(not(target_arch = "arm64ec"))]
13637#[cfg_attr(test, assert_instr(fmaxv))]
13638pub fn vmaxv_f16(a: float16x4_t) -> f16 {
13639    unsafe extern "unadjusted" {
13640        #[cfg_attr(
13641            any(target_arch = "aarch64", target_arch = "arm64ec"),
13642            link_name = "llvm.aarch64.neon.fmaxv.f16.v4f16"
13643        )]
13644        fn _vmaxv_f16(a: float16x4_t) -> f16;
13645    }
13646    unsafe { _vmaxv_f16(a) }
13647}
13648#[doc = "Floating-point maximum number across vector"]
13649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f16)"]
13650#[inline(always)]
13651#[target_feature(enable = "neon,fp16")]
13652#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13653#[cfg(not(target_arch = "arm64ec"))]
13654#[cfg_attr(test, assert_instr(fmaxv))]
13655pub fn vmaxvq_f16(a: float16x8_t) -> f16 {
13656    unsafe extern "unadjusted" {
13657        #[cfg_attr(
13658            any(target_arch = "aarch64", target_arch = "arm64ec"),
13659            link_name = "llvm.aarch64.neon.fmaxv.f16.v8f16"
13660        )]
13661        fn _vmaxvq_f16(a: float16x8_t) -> f16;
13662    }
13663    unsafe { _vmaxvq_f16(a) }
13664}
13665#[doc = "Horizontal vector max."]
13666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"]
13667#[inline(always)]
13668#[target_feature(enable = "neon")]
13669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13670#[cfg_attr(test, assert_instr(fmaxp))]
13671pub fn vmaxv_f32(a: float32x2_t) -> f32 {
13672    unsafe extern "unadjusted" {
13673        #[cfg_attr(
13674            any(target_arch = "aarch64", target_arch = "arm64ec"),
13675            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
13676        )]
13677        fn _vmaxv_f32(a: float32x2_t) -> f32;
13678    }
13679    unsafe { _vmaxv_f32(a) }
13680}
13681#[doc = "Horizontal vector max."]
13682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"]
13683#[inline(always)]
13684#[target_feature(enable = "neon")]
13685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13686#[cfg_attr(test, assert_instr(fmaxv))]
13687pub fn vmaxvq_f32(a: float32x4_t) -> f32 {
13688    unsafe extern "unadjusted" {
13689        #[cfg_attr(
13690            any(target_arch = "aarch64", target_arch = "arm64ec"),
13691            link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32"
13692        )]
13693        fn _vmaxvq_f32(a: float32x4_t) -> f32;
13694    }
13695    unsafe { _vmaxvq_f32(a) }
13696}
13697#[doc = "Horizontal vector max."]
13698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"]
13699#[inline(always)]
13700#[target_feature(enable = "neon")]
13701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13702#[cfg_attr(test, assert_instr(fmaxp))]
13703pub fn vmaxvq_f64(a: float64x2_t) -> f64 {
13704    unsafe extern "unadjusted" {
13705        #[cfg_attr(
13706            any(target_arch = "aarch64", target_arch = "arm64ec"),
13707            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
13708        )]
13709        fn _vmaxvq_f64(a: float64x2_t) -> f64;
13710    }
13711    unsafe { _vmaxvq_f64(a) }
13712}
13713#[doc = "Horizontal vector max."]
13714#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"]
13715#[inline(always)]
13716#[target_feature(enable = "neon")]
13717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13718#[cfg_attr(test, assert_instr(smaxv))]
13719pub fn vmaxv_s8(a: int8x8_t) -> i8 {
13720    unsafe { simd_reduce_max(a) }
13721}
13722#[doc = "Horizontal vector max."]
13723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"]
13724#[inline(always)]
13725#[target_feature(enable = "neon")]
13726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13727#[cfg_attr(test, assert_instr(smaxv))]
13728pub fn vmaxvq_s8(a: int8x16_t) -> i8 {
13729    unsafe { simd_reduce_max(a) }
13730}
13731#[doc = "Horizontal vector max."]
13732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"]
13733#[inline(always)]
13734#[target_feature(enable = "neon")]
13735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13736#[cfg_attr(test, assert_instr(smaxv))]
13737pub fn vmaxv_s16(a: int16x4_t) -> i16 {
13738    unsafe { simd_reduce_max(a) }
13739}
13740#[doc = "Horizontal vector max."]
13741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"]
13742#[inline(always)]
13743#[target_feature(enable = "neon")]
13744#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13745#[cfg_attr(test, assert_instr(smaxv))]
13746pub fn vmaxvq_s16(a: int16x8_t) -> i16 {
13747    unsafe { simd_reduce_max(a) }
13748}
13749#[doc = "Horizontal vector max."]
13750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"]
13751#[inline(always)]
13752#[target_feature(enable = "neon")]
13753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13754#[cfg_attr(test, assert_instr(smaxp))]
13755pub fn vmaxv_s32(a: int32x2_t) -> i32 {
13756    unsafe { simd_reduce_max(a) }
13757}
13758#[doc = "Horizontal vector max."]
13759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"]
13760#[inline(always)]
13761#[target_feature(enable = "neon")]
13762#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13763#[cfg_attr(test, assert_instr(smaxv))]
13764pub fn vmaxvq_s32(a: int32x4_t) -> i32 {
13765    unsafe { simd_reduce_max(a) }
13766}
13767#[doc = "Horizontal vector max."]
13768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"]
13769#[inline(always)]
13770#[target_feature(enable = "neon")]
13771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13772#[cfg_attr(test, assert_instr(umaxv))]
13773pub fn vmaxv_u8(a: uint8x8_t) -> u8 {
13774    unsafe { simd_reduce_max(a) }
13775}
13776#[doc = "Horizontal vector max."]
13777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"]
13778#[inline(always)]
13779#[target_feature(enable = "neon")]
13780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13781#[cfg_attr(test, assert_instr(umaxv))]
13782pub fn vmaxvq_u8(a: uint8x16_t) -> u8 {
13783    unsafe { simd_reduce_max(a) }
13784}
13785#[doc = "Horizontal vector max."]
13786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"]
13787#[inline(always)]
13788#[target_feature(enable = "neon")]
13789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13790#[cfg_attr(test, assert_instr(umaxv))]
13791pub fn vmaxv_u16(a: uint16x4_t) -> u16 {
13792    unsafe { simd_reduce_max(a) }
13793}
13794#[doc = "Horizontal vector max."]
13795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"]
13796#[inline(always)]
13797#[target_feature(enable = "neon")]
13798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13799#[cfg_attr(test, assert_instr(umaxv))]
13800pub fn vmaxvq_u16(a: uint16x8_t) -> u16 {
13801    unsafe { simd_reduce_max(a) }
13802}
13803#[doc = "Horizontal vector max."]
13804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"]
13805#[inline(always)]
13806#[target_feature(enable = "neon")]
13807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13808#[cfg_attr(test, assert_instr(umaxp))]
13809pub fn vmaxv_u32(a: uint32x2_t) -> u32 {
13810    unsafe { simd_reduce_max(a) }
13811}
13812#[doc = "Horizontal vector max."]
13813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"]
13814#[inline(always)]
13815#[target_feature(enable = "neon")]
13816#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13817#[cfg_attr(test, assert_instr(umaxv))]
13818pub fn vmaxvq_u32(a: uint32x4_t) -> u32 {
13819    unsafe { simd_reduce_max(a) }
13820}
13821#[doc = "Minimum (vector)"]
13822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"]
13823#[inline(always)]
13824#[target_feature(enable = "neon")]
13825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13826#[cfg_attr(test, assert_instr(fmin))]
13827pub fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13828    unsafe extern "unadjusted" {
13829        #[cfg_attr(
13830            any(target_arch = "aarch64", target_arch = "arm64ec"),
13831            link_name = "llvm.aarch64.neon.fmin.v1f64"
13832        )]
13833        fn _vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13834    }
13835    unsafe { _vmin_f64(a, b) }
13836}
13837#[doc = "Minimum (vector)"]
13838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"]
13839#[inline(always)]
13840#[target_feature(enable = "neon")]
13841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13842#[cfg_attr(test, assert_instr(fmin))]
13843pub fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13844    unsafe extern "unadjusted" {
13845        #[cfg_attr(
13846            any(target_arch = "aarch64", target_arch = "arm64ec"),
13847            link_name = "llvm.aarch64.neon.fmin.v2f64"
13848        )]
13849        fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13850    }
13851    unsafe { _vminq_f64(a, b) }
13852}
13853#[doc = "Minimum (vector)"]
13854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminh_f16)"]
13855#[inline(always)]
13856#[target_feature(enable = "neon,fp16")]
13857#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13858#[cfg(not(target_arch = "arm64ec"))]
13859#[cfg_attr(test, assert_instr(fmin))]
13860pub fn vminh_f16(a: f16, b: f16) -> f16 {
13861    unsafe extern "unadjusted" {
13862        #[cfg_attr(
13863            any(target_arch = "aarch64", target_arch = "arm64ec"),
13864            link_name = "llvm.aarch64.neon.fmin.f16"
13865        )]
13866        fn _vminh_f16(a: f16, b: f16) -> f16;
13867    }
13868    unsafe { _vminh_f16(a, b) }
13869}
13870#[doc = "Floating-point Minimum Number (vector)"]
13871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"]
13872#[inline(always)]
13873#[target_feature(enable = "neon")]
13874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13875#[cfg_attr(test, assert_instr(fminnm))]
13876pub fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13877    unsafe extern "unadjusted" {
13878        #[cfg_attr(
13879            any(target_arch = "aarch64", target_arch = "arm64ec"),
13880            link_name = "llvm.aarch64.neon.fminnm.v1f64"
13881        )]
13882        fn _vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13883    }
13884    unsafe { _vminnm_f64(a, b) }
13885}
13886#[doc = "Floating-point Minimum Number (vector)"]
13887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"]
13888#[inline(always)]
13889#[target_feature(enable = "neon")]
13890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13891#[cfg_attr(test, assert_instr(fminnm))]
13892pub fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13893    unsafe extern "unadjusted" {
13894        #[cfg_attr(
13895            any(target_arch = "aarch64", target_arch = "arm64ec"),
13896            link_name = "llvm.aarch64.neon.fminnm.v2f64"
13897        )]
13898        fn _vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13899    }
13900    unsafe { _vminnmq_f64(a, b) }
13901}
13902#[doc = "Floating-point Minimum Number"]
13903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmh_f16)"]
13904#[inline(always)]
13905#[target_feature(enable = "neon,fp16")]
13906#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13907#[cfg(not(target_arch = "arm64ec"))]
13908#[cfg_attr(test, assert_instr(fminnm))]
13909pub fn vminnmh_f16(a: f16, b: f16) -> f16 {
13910    unsafe extern "unadjusted" {
13911        #[cfg_attr(
13912            any(target_arch = "aarch64", target_arch = "arm64ec"),
13913            link_name = "llvm.aarch64.neon.fminnm.f16"
13914        )]
13915        fn _vminnmh_f16(a: f16, b: f16) -> f16;
13916    }
13917    unsafe { _vminnmh_f16(a, b) }
13918}
13919#[doc = "Floating-point minimum number across vector"]
13920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f16)"]
13921#[inline(always)]
13922#[target_feature(enable = "neon,fp16")]
13923#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13924#[cfg(not(target_arch = "arm64ec"))]
13925#[cfg_attr(test, assert_instr(fminnmv))]
13926pub fn vminnmv_f16(a: float16x4_t) -> f16 {
13927    unsafe extern "unadjusted" {
13928        #[cfg_attr(
13929            any(target_arch = "aarch64", target_arch = "arm64ec"),
13930            link_name = "llvm.aarch64.neon.fminnmv.f16.v4f16"
13931        )]
13932        fn _vminnmv_f16(a: float16x4_t) -> f16;
13933    }
13934    unsafe { _vminnmv_f16(a) }
13935}
13936#[doc = "Floating-point minimum number across vector"]
13937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f16)"]
13938#[inline(always)]
13939#[target_feature(enable = "neon,fp16")]
13940#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13941#[cfg(not(target_arch = "arm64ec"))]
13942#[cfg_attr(test, assert_instr(fminnmv))]
13943pub fn vminnmvq_f16(a: float16x8_t) -> f16 {
13944    unsafe extern "unadjusted" {
13945        #[cfg_attr(
13946            any(target_arch = "aarch64", target_arch = "arm64ec"),
13947            link_name = "llvm.aarch64.neon.fminnmv.f16.v8f16"
13948        )]
13949        fn _vminnmvq_f16(a: float16x8_t) -> f16;
13950    }
13951    unsafe { _vminnmvq_f16(a) }
13952}
13953#[doc = "Floating-point minimum number across vector"]
13954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"]
13955#[inline(always)]
13956#[target_feature(enable = "neon")]
13957#[cfg_attr(test, assert_instr(fminnmp))]
13958#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13959pub fn vminnmv_f32(a: float32x2_t) -> f32 {
13960    unsafe extern "unadjusted" {
13961        #[cfg_attr(
13962            any(target_arch = "aarch64", target_arch = "arm64ec"),
13963            link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32"
13964        )]
13965        fn _vminnmv_f32(a: float32x2_t) -> f32;
13966    }
13967    unsafe { _vminnmv_f32(a) }
13968}
13969#[doc = "Floating-point minimum number across vector"]
13970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"]
13971#[inline(always)]
13972#[target_feature(enable = "neon")]
13973#[cfg_attr(test, assert_instr(fminnmp))]
13974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13975pub fn vminnmvq_f64(a: float64x2_t) -> f64 {
13976    unsafe extern "unadjusted" {
13977        #[cfg_attr(
13978            any(target_arch = "aarch64", target_arch = "arm64ec"),
13979            link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64"
13980        )]
13981        fn _vminnmvq_f64(a: float64x2_t) -> f64;
13982    }
13983    unsafe { _vminnmvq_f64(a) }
13984}
13985#[doc = "Floating-point minimum number across vector"]
13986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"]
13987#[inline(always)]
13988#[target_feature(enable = "neon")]
13989#[cfg_attr(test, assert_instr(fminnmv))]
13990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13991pub fn vminnmvq_f32(a: float32x4_t) -> f32 {
13992    unsafe extern "unadjusted" {
13993        #[cfg_attr(
13994            any(target_arch = "aarch64", target_arch = "arm64ec"),
13995            link_name = "llvm.aarch64.neon.fminnmv.f32.v4f32"
13996        )]
13997        fn _vminnmvq_f32(a: float32x4_t) -> f32;
13998    }
13999    unsafe { _vminnmvq_f32(a) }
14000}
14001#[doc = "Floating-point minimum number across vector"]
14002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f16)"]
14003#[inline(always)]
14004#[target_feature(enable = "neon,fp16")]
14005#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14006#[cfg(not(target_arch = "arm64ec"))]
14007#[cfg_attr(test, assert_instr(fminv))]
14008pub fn vminv_f16(a: float16x4_t) -> f16 {
14009    unsafe extern "unadjusted" {
14010        #[cfg_attr(
14011            any(target_arch = "aarch64", target_arch = "arm64ec"),
14012            link_name = "llvm.aarch64.neon.fminv.f16.v4f16"
14013        )]
14014        fn _vminv_f16(a: float16x4_t) -> f16;
14015    }
14016    unsafe { _vminv_f16(a) }
14017}
14018#[doc = "Floating-point minimum number across vector"]
14019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f16)"]
14020#[inline(always)]
14021#[target_feature(enable = "neon,fp16")]
14022#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14023#[cfg(not(target_arch = "arm64ec"))]
14024#[cfg_attr(test, assert_instr(fminv))]
14025pub fn vminvq_f16(a: float16x8_t) -> f16 {
14026    unsafe extern "unadjusted" {
14027        #[cfg_attr(
14028            any(target_arch = "aarch64", target_arch = "arm64ec"),
14029            link_name = "llvm.aarch64.neon.fminv.f16.v8f16"
14030        )]
14031        fn _vminvq_f16(a: float16x8_t) -> f16;
14032    }
14033    unsafe { _vminvq_f16(a) }
14034}
14035#[doc = "Horizontal vector min."]
14036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"]
14037#[inline(always)]
14038#[target_feature(enable = "neon")]
14039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14040#[cfg_attr(test, assert_instr(fminp))]
14041pub fn vminv_f32(a: float32x2_t) -> f32 {
14042    unsafe extern "unadjusted" {
14043        #[cfg_attr(
14044            any(target_arch = "aarch64", target_arch = "arm64ec"),
14045            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
14046        )]
14047        fn _vminv_f32(a: float32x2_t) -> f32;
14048    }
14049    unsafe { _vminv_f32(a) }
14050}
14051#[doc = "Horizontal vector min."]
14052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"]
14053#[inline(always)]
14054#[target_feature(enable = "neon")]
14055#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14056#[cfg_attr(test, assert_instr(fminv))]
14057pub fn vminvq_f32(a: float32x4_t) -> f32 {
14058    unsafe extern "unadjusted" {
14059        #[cfg_attr(
14060            any(target_arch = "aarch64", target_arch = "arm64ec"),
14061            link_name = "llvm.aarch64.neon.fminv.f32.v4f32"
14062        )]
14063        fn _vminvq_f32(a: float32x4_t) -> f32;
14064    }
14065    unsafe { _vminvq_f32(a) }
14066}
14067#[doc = "Horizontal vector min."]
14068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"]
14069#[inline(always)]
14070#[target_feature(enable = "neon")]
14071#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14072#[cfg_attr(test, assert_instr(fminp))]
14073pub fn vminvq_f64(a: float64x2_t) -> f64 {
14074    unsafe extern "unadjusted" {
14075        #[cfg_attr(
14076            any(target_arch = "aarch64", target_arch = "arm64ec"),
14077            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
14078        )]
14079        fn _vminvq_f64(a: float64x2_t) -> f64;
14080    }
14081    unsafe { _vminvq_f64(a) }
14082}
14083#[doc = "Horizontal vector min."]
14084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"]
14085#[inline(always)]
14086#[target_feature(enable = "neon")]
14087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14088#[cfg_attr(test, assert_instr(sminv))]
14089pub fn vminv_s8(a: int8x8_t) -> i8 {
14090    unsafe { simd_reduce_min(a) }
14091}
14092#[doc = "Horizontal vector min."]
14093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"]
14094#[inline(always)]
14095#[target_feature(enable = "neon")]
14096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14097#[cfg_attr(test, assert_instr(sminv))]
14098pub fn vminvq_s8(a: int8x16_t) -> i8 {
14099    unsafe { simd_reduce_min(a) }
14100}
14101#[doc = "Horizontal vector min."]
14102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"]
14103#[inline(always)]
14104#[target_feature(enable = "neon")]
14105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14106#[cfg_attr(test, assert_instr(sminv))]
14107pub fn vminv_s16(a: int16x4_t) -> i16 {
14108    unsafe { simd_reduce_min(a) }
14109}
14110#[doc = "Horizontal vector min."]
14111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"]
14112#[inline(always)]
14113#[target_feature(enable = "neon")]
14114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14115#[cfg_attr(test, assert_instr(sminv))]
14116pub fn vminvq_s16(a: int16x8_t) -> i16 {
14117    unsafe { simd_reduce_min(a) }
14118}
14119#[doc = "Horizontal vector min."]
14120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"]
14121#[inline(always)]
14122#[target_feature(enable = "neon")]
14123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14124#[cfg_attr(test, assert_instr(sminp))]
14125pub fn vminv_s32(a: int32x2_t) -> i32 {
14126    unsafe { simd_reduce_min(a) }
14127}
14128#[doc = "Horizontal vector min."]
14129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"]
14130#[inline(always)]
14131#[target_feature(enable = "neon")]
14132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14133#[cfg_attr(test, assert_instr(sminv))]
14134pub fn vminvq_s32(a: int32x4_t) -> i32 {
14135    unsafe { simd_reduce_min(a) }
14136}
14137#[doc = "Horizontal vector min."]
14138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"]
14139#[inline(always)]
14140#[target_feature(enable = "neon")]
14141#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14142#[cfg_attr(test, assert_instr(uminv))]
14143pub fn vminv_u8(a: uint8x8_t) -> u8 {
14144    unsafe { simd_reduce_min(a) }
14145}
14146#[doc = "Horizontal vector min."]
14147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"]
14148#[inline(always)]
14149#[target_feature(enable = "neon")]
14150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14151#[cfg_attr(test, assert_instr(uminv))]
14152pub fn vminvq_u8(a: uint8x16_t) -> u8 {
14153    unsafe { simd_reduce_min(a) }
14154}
14155#[doc = "Horizontal vector min."]
14156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"]
14157#[inline(always)]
14158#[target_feature(enable = "neon")]
14159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14160#[cfg_attr(test, assert_instr(uminv))]
14161pub fn vminv_u16(a: uint16x4_t) -> u16 {
14162    unsafe { simd_reduce_min(a) }
14163}
14164#[doc = "Horizontal vector min."]
14165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"]
14166#[inline(always)]
14167#[target_feature(enable = "neon")]
14168#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14169#[cfg_attr(test, assert_instr(uminv))]
14170pub fn vminvq_u16(a: uint16x8_t) -> u16 {
14171    unsafe { simd_reduce_min(a) }
14172}
14173#[doc = "Horizontal vector min."]
14174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"]
14175#[inline(always)]
14176#[target_feature(enable = "neon")]
14177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14178#[cfg_attr(test, assert_instr(uminp))]
14179pub fn vminv_u32(a: uint32x2_t) -> u32 {
14180    unsafe { simd_reduce_min(a) }
14181}
14182#[doc = "Horizontal vector min."]
14183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"]
14184#[inline(always)]
14185#[target_feature(enable = "neon")]
14186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14187#[cfg_attr(test, assert_instr(uminv))]
14188pub fn vminvq_u32(a: uint32x4_t) -> u32 {
14189    unsafe { simd_reduce_min(a) }
14190}
14191#[doc = "Floating-point multiply-add to accumulator"]
14192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"]
14193#[inline(always)]
14194#[target_feature(enable = "neon")]
14195#[cfg_attr(test, assert_instr(fmul))]
14196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14197pub fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14198    unsafe { simd_add(a, simd_mul(b, c)) }
14199}
14200#[doc = "Floating-point multiply-add to accumulator"]
14201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"]
14202#[inline(always)]
14203#[target_feature(enable = "neon")]
14204#[cfg_attr(test, assert_instr(fmul))]
14205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14206pub fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14207    unsafe { simd_add(a, simd_mul(b, c)) }
14208}
14209#[doc = "Multiply-add long"]
14210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"]
14211#[inline(always)]
14212#[target_feature(enable = "neon")]
14213#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14214#[rustc_legacy_const_generics(3)]
14215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14216pub fn vmlal_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14217    static_assert_uimm_bits!(LANE, 2);
14218    unsafe { vmlal_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14219}
14220#[doc = "Multiply-add long"]
14221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"]
14222#[inline(always)]
14223#[target_feature(enable = "neon")]
14224#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14225#[rustc_legacy_const_generics(3)]
14226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14227pub fn vmlal_high_laneq_s16<const LANE: i32>(
14228    a: int32x4_t,
14229    b: int16x8_t,
14230    c: int16x8_t,
14231) -> int32x4_t {
14232    static_assert_uimm_bits!(LANE, 3);
14233    unsafe { vmlal_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14234}
14235#[doc = "Multiply-add long"]
14236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"]
14237#[inline(always)]
14238#[target_feature(enable = "neon")]
14239#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14240#[rustc_legacy_const_generics(3)]
14241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14242pub fn vmlal_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14243    static_assert_uimm_bits!(LANE, 1);
14244    unsafe { vmlal_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14245}
14246#[doc = "Multiply-add long"]
14247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"]
14248#[inline(always)]
14249#[target_feature(enable = "neon")]
14250#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14251#[rustc_legacy_const_generics(3)]
14252#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14253pub fn vmlal_high_laneq_s32<const LANE: i32>(
14254    a: int64x2_t,
14255    b: int32x4_t,
14256    c: int32x4_t,
14257) -> int64x2_t {
14258    static_assert_uimm_bits!(LANE, 2);
14259    unsafe { vmlal_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14260}
14261#[doc = "Multiply-add long"]
14262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"]
14263#[inline(always)]
14264#[target_feature(enable = "neon")]
14265#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14266#[rustc_legacy_const_generics(3)]
14267#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14268pub fn vmlal_high_lane_u16<const LANE: i32>(
14269    a: uint32x4_t,
14270    b: uint16x8_t,
14271    c: uint16x4_t,
14272) -> uint32x4_t {
14273    static_assert_uimm_bits!(LANE, 2);
14274    unsafe { vmlal_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14275}
14276#[doc = "Multiply-add long"]
14277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"]
14278#[inline(always)]
14279#[target_feature(enable = "neon")]
14280#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14281#[rustc_legacy_const_generics(3)]
14282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14283pub fn vmlal_high_laneq_u16<const LANE: i32>(
14284    a: uint32x4_t,
14285    b: uint16x8_t,
14286    c: uint16x8_t,
14287) -> uint32x4_t {
14288    static_assert_uimm_bits!(LANE, 3);
14289    unsafe { vmlal_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14290}
14291#[doc = "Multiply-add long"]
14292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"]
14293#[inline(always)]
14294#[target_feature(enable = "neon")]
14295#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14296#[rustc_legacy_const_generics(3)]
14297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14298pub fn vmlal_high_lane_u32<const LANE: i32>(
14299    a: uint64x2_t,
14300    b: uint32x4_t,
14301    c: uint32x2_t,
14302) -> uint64x2_t {
14303    static_assert_uimm_bits!(LANE, 1);
14304    unsafe { vmlal_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14305}
14306#[doc = "Multiply-add long"]
14307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"]
14308#[inline(always)]
14309#[target_feature(enable = "neon")]
14310#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14311#[rustc_legacy_const_generics(3)]
14312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14313pub fn vmlal_high_laneq_u32<const LANE: i32>(
14314    a: uint64x2_t,
14315    b: uint32x4_t,
14316    c: uint32x4_t,
14317) -> uint64x2_t {
14318    static_assert_uimm_bits!(LANE, 2);
14319    unsafe { vmlal_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14320}
14321#[doc = "Multiply-add long"]
14322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"]
14323#[inline(always)]
14324#[target_feature(enable = "neon")]
14325#[cfg_attr(test, assert_instr(smlal2))]
14326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14327pub fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14328    vmlal_high_s16(a, b, vdupq_n_s16(c))
14329}
14330#[doc = "Multiply-add long"]
14331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"]
14332#[inline(always)]
14333#[target_feature(enable = "neon")]
14334#[cfg_attr(test, assert_instr(smlal2))]
14335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14336pub fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14337    vmlal_high_s32(a, b, vdupq_n_s32(c))
14338}
14339#[doc = "Multiply-add long"]
14340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"]
14341#[inline(always)]
14342#[target_feature(enable = "neon")]
14343#[cfg_attr(test, assert_instr(umlal2))]
14344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14345pub fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14346    vmlal_high_u16(a, b, vdupq_n_u16(c))
14347}
14348#[doc = "Multiply-add long"]
14349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"]
14350#[inline(always)]
14351#[target_feature(enable = "neon")]
14352#[cfg_attr(test, assert_instr(umlal2))]
14353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14354pub fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14355    vmlal_high_u32(a, b, vdupq_n_u32(c))
14356}
14357#[doc = "Signed multiply-add long"]
14358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"]
14359#[inline(always)]
14360#[target_feature(enable = "neon")]
14361#[cfg_attr(test, assert_instr(smlal2))]
14362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14363pub fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14364    unsafe {
14365        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14366        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14367        vmlal_s8(a, b, c)
14368    }
14369}
14370#[doc = "Signed multiply-add long"]
14371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"]
14372#[inline(always)]
14373#[target_feature(enable = "neon")]
14374#[cfg_attr(test, assert_instr(smlal2))]
14375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14376pub fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14377    unsafe {
14378        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14379        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14380        vmlal_s16(a, b, c)
14381    }
14382}
14383#[doc = "Signed multiply-add long"]
14384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"]
14385#[inline(always)]
14386#[target_feature(enable = "neon")]
14387#[cfg_attr(test, assert_instr(smlal2))]
14388#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14389pub fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14390    unsafe {
14391        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14392        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14393        vmlal_s32(a, b, c)
14394    }
14395}
14396#[doc = "Unsigned multiply-add long"]
14397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"]
14398#[inline(always)]
14399#[target_feature(enable = "neon")]
14400#[cfg_attr(test, assert_instr(umlal2))]
14401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14402pub fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14403    unsafe {
14404        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14405        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14406        vmlal_u8(a, b, c)
14407    }
14408}
14409#[doc = "Unsigned multiply-add long"]
14410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"]
14411#[inline(always)]
14412#[target_feature(enable = "neon")]
14413#[cfg_attr(test, assert_instr(umlal2))]
14414#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14415pub fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14416    unsafe {
14417        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14418        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14419        vmlal_u16(a, b, c)
14420    }
14421}
14422#[doc = "Unsigned multiply-add long"]
14423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"]
14424#[inline(always)]
14425#[target_feature(enable = "neon")]
14426#[cfg_attr(test, assert_instr(umlal2))]
14427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14428pub fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14429    unsafe {
14430        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14431        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14432        vmlal_u32(a, b, c)
14433    }
14434}
14435#[doc = "Floating-point multiply-subtract from accumulator"]
14436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"]
14437#[inline(always)]
14438#[target_feature(enable = "neon")]
14439#[cfg_attr(test, assert_instr(fmul))]
14440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14441pub fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14442    unsafe { simd_sub(a, simd_mul(b, c)) }
14443}
14444#[doc = "Floating-point multiply-subtract from accumulator"]
14445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"]
14446#[inline(always)]
14447#[target_feature(enable = "neon")]
14448#[cfg_attr(test, assert_instr(fmul))]
14449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14450pub fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14451    unsafe { simd_sub(a, simd_mul(b, c)) }
14452}
14453#[doc = "Multiply-subtract long"]
14454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"]
14455#[inline(always)]
14456#[target_feature(enable = "neon")]
14457#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14458#[rustc_legacy_const_generics(3)]
14459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14460pub fn vmlsl_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14461    static_assert_uimm_bits!(LANE, 2);
14462    unsafe { vmlsl_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14463}
14464#[doc = "Multiply-subtract long"]
14465#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"]
14466#[inline(always)]
14467#[target_feature(enable = "neon")]
14468#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14469#[rustc_legacy_const_generics(3)]
14470#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14471pub fn vmlsl_high_laneq_s16<const LANE: i32>(
14472    a: int32x4_t,
14473    b: int16x8_t,
14474    c: int16x8_t,
14475) -> int32x4_t {
14476    static_assert_uimm_bits!(LANE, 3);
14477    unsafe { vmlsl_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14478}
14479#[doc = "Multiply-subtract long"]
14480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"]
14481#[inline(always)]
14482#[target_feature(enable = "neon")]
14483#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14484#[rustc_legacy_const_generics(3)]
14485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14486pub fn vmlsl_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14487    static_assert_uimm_bits!(LANE, 1);
14488    unsafe { vmlsl_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14489}
14490#[doc = "Multiply-subtract long"]
14491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"]
14492#[inline(always)]
14493#[target_feature(enable = "neon")]
14494#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14495#[rustc_legacy_const_generics(3)]
14496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14497pub fn vmlsl_high_laneq_s32<const LANE: i32>(
14498    a: int64x2_t,
14499    b: int32x4_t,
14500    c: int32x4_t,
14501) -> int64x2_t {
14502    static_assert_uimm_bits!(LANE, 2);
14503    unsafe { vmlsl_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14504}
14505#[doc = "Multiply-subtract long"]
14506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"]
14507#[inline(always)]
14508#[target_feature(enable = "neon")]
14509#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14510#[rustc_legacy_const_generics(3)]
14511#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14512pub fn vmlsl_high_lane_u16<const LANE: i32>(
14513    a: uint32x4_t,
14514    b: uint16x8_t,
14515    c: uint16x4_t,
14516) -> uint32x4_t {
14517    static_assert_uimm_bits!(LANE, 2);
14518    unsafe { vmlsl_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14519}
14520#[doc = "Multiply-subtract long"]
14521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"]
14522#[inline(always)]
14523#[target_feature(enable = "neon")]
14524#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14525#[rustc_legacy_const_generics(3)]
14526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14527pub fn vmlsl_high_laneq_u16<const LANE: i32>(
14528    a: uint32x4_t,
14529    b: uint16x8_t,
14530    c: uint16x8_t,
14531) -> uint32x4_t {
14532    static_assert_uimm_bits!(LANE, 3);
14533    unsafe { vmlsl_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32; 8])) }
14534}
14535#[doc = "Multiply-subtract long"]
14536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"]
14537#[inline(always)]
14538#[target_feature(enable = "neon")]
14539#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14540#[rustc_legacy_const_generics(3)]
14541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14542pub fn vmlsl_high_lane_u32<const LANE: i32>(
14543    a: uint64x2_t,
14544    b: uint32x4_t,
14545    c: uint32x2_t,
14546) -> uint64x2_t {
14547    static_assert_uimm_bits!(LANE, 1);
14548    unsafe { vmlsl_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14549}
14550#[doc = "Multiply-subtract long"]
14551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"]
14552#[inline(always)]
14553#[target_feature(enable = "neon")]
14554#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14555#[rustc_legacy_const_generics(3)]
14556#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14557pub fn vmlsl_high_laneq_u32<const LANE: i32>(
14558    a: uint64x2_t,
14559    b: uint32x4_t,
14560    c: uint32x4_t,
14561) -> uint64x2_t {
14562    static_assert_uimm_bits!(LANE, 2);
14563    unsafe { vmlsl_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32; 4])) }
14564}
14565#[doc = "Multiply-subtract long"]
14566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"]
14567#[inline(always)]
14568#[target_feature(enable = "neon")]
14569#[cfg_attr(test, assert_instr(smlsl2))]
14570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14571pub fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14572    vmlsl_high_s16(a, b, vdupq_n_s16(c))
14573}
14574#[doc = "Multiply-subtract long"]
14575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"]
14576#[inline(always)]
14577#[target_feature(enable = "neon")]
14578#[cfg_attr(test, assert_instr(smlsl2))]
14579#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14580pub fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14581    vmlsl_high_s32(a, b, vdupq_n_s32(c))
14582}
14583#[doc = "Multiply-subtract long"]
14584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"]
14585#[inline(always)]
14586#[target_feature(enable = "neon")]
14587#[cfg_attr(test, assert_instr(umlsl2))]
14588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14589pub fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14590    vmlsl_high_u16(a, b, vdupq_n_u16(c))
14591}
14592#[doc = "Multiply-subtract long"]
14593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"]
14594#[inline(always)]
14595#[target_feature(enable = "neon")]
14596#[cfg_attr(test, assert_instr(umlsl2))]
14597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14598pub fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14599    vmlsl_high_u32(a, b, vdupq_n_u32(c))
14600}
14601#[doc = "Signed multiply-subtract long"]
14602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"]
14603#[inline(always)]
14604#[target_feature(enable = "neon")]
14605#[cfg_attr(test, assert_instr(smlsl2))]
14606#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14607pub fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14608    unsafe {
14609        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14610        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14611        vmlsl_s8(a, b, c)
14612    }
14613}
14614#[doc = "Signed multiply-subtract long"]
14615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"]
14616#[inline(always)]
14617#[target_feature(enable = "neon")]
14618#[cfg_attr(test, assert_instr(smlsl2))]
14619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14620pub fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14621    unsafe {
14622        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14623        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14624        vmlsl_s16(a, b, c)
14625    }
14626}
14627#[doc = "Signed multiply-subtract long"]
14628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"]
14629#[inline(always)]
14630#[target_feature(enable = "neon")]
14631#[cfg_attr(test, assert_instr(smlsl2))]
14632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14633pub fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14634    unsafe {
14635        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14636        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14637        vmlsl_s32(a, b, c)
14638    }
14639}
14640#[doc = "Unsigned multiply-subtract long"]
14641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"]
14642#[inline(always)]
14643#[target_feature(enable = "neon")]
14644#[cfg_attr(test, assert_instr(umlsl2))]
14645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14646pub fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14647    unsafe {
14648        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14649        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14650        vmlsl_u8(a, b, c)
14651    }
14652}
14653#[doc = "Unsigned multiply-subtract long"]
14654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"]
14655#[inline(always)]
14656#[target_feature(enable = "neon")]
14657#[cfg_attr(test, assert_instr(umlsl2))]
14658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14659pub fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14660    unsafe {
14661        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14662        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14663        vmlsl_u16(a, b, c)
14664    }
14665}
14666#[doc = "Unsigned multiply-subtract long"]
14667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"]
14668#[inline(always)]
14669#[target_feature(enable = "neon")]
14670#[cfg_attr(test, assert_instr(umlsl2))]
14671#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14672pub fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14673    unsafe {
14674        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14675        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14676        vmlsl_u32(a, b, c)
14677    }
14678}
14679#[doc = "Vector move"]
14680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"]
14681#[inline(always)]
14682#[target_feature(enable = "neon")]
14683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14684#[cfg_attr(test, assert_instr(sxtl2))]
14685pub fn vmovl_high_s8(a: int8x16_t) -> int16x8_t {
14686    unsafe {
14687        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14688        vmovl_s8(a)
14689    }
14690}
14691#[doc = "Vector move"]
14692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"]
14693#[inline(always)]
14694#[target_feature(enable = "neon")]
14695#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14696#[cfg_attr(test, assert_instr(sxtl2))]
14697pub fn vmovl_high_s16(a: int16x8_t) -> int32x4_t {
14698    unsafe {
14699        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14700        vmovl_s16(a)
14701    }
14702}
14703#[doc = "Vector move"]
14704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"]
14705#[inline(always)]
14706#[target_feature(enable = "neon")]
14707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14708#[cfg_attr(test, assert_instr(sxtl2))]
14709pub fn vmovl_high_s32(a: int32x4_t) -> int64x2_t {
14710    unsafe {
14711        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
14712        vmovl_s32(a)
14713    }
14714}
14715#[doc = "Vector move"]
14716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"]
14717#[inline(always)]
14718#[target_feature(enable = "neon")]
14719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14720#[cfg_attr(test, assert_instr(uxtl2))]
14721pub fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t {
14722    unsafe {
14723        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14724        vmovl_u8(a)
14725    }
14726}
14727#[doc = "Vector move"]
14728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"]
14729#[inline(always)]
14730#[target_feature(enable = "neon")]
14731#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14732#[cfg_attr(test, assert_instr(uxtl2))]
14733pub fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t {
14734    unsafe {
14735        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14736        vmovl_u16(a)
14737    }
14738}
14739#[doc = "Vector move"]
14740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"]
14741#[inline(always)]
14742#[target_feature(enable = "neon")]
14743#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14744#[cfg_attr(test, assert_instr(uxtl2))]
14745pub fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t {
14746    unsafe {
14747        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
14748        vmovl_u32(a)
14749    }
14750}
14751#[doc = "Extract narrow"]
14752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"]
14753#[inline(always)]
14754#[target_feature(enable = "neon")]
14755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14756#[cfg_attr(test, assert_instr(xtn2))]
14757pub fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
14758    unsafe {
14759        let c: int8x8_t = simd_cast(b);
14760        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14761    }
14762}
14763#[doc = "Extract narrow"]
14764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"]
14765#[inline(always)]
14766#[target_feature(enable = "neon")]
14767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14768#[cfg_attr(test, assert_instr(xtn2))]
14769pub fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
14770    unsafe {
14771        let c: int16x4_t = simd_cast(b);
14772        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14773    }
14774}
14775#[doc = "Extract narrow"]
14776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"]
14777#[inline(always)]
14778#[target_feature(enable = "neon")]
14779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14780#[cfg_attr(test, assert_instr(xtn2))]
14781pub fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
14782    unsafe {
14783        let c: int32x2_t = simd_cast(b);
14784        simd_shuffle!(a, c, [0, 1, 2, 3])
14785    }
14786}
14787#[doc = "Extract narrow"]
14788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"]
14789#[inline(always)]
14790#[target_feature(enable = "neon")]
14791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14792#[cfg_attr(test, assert_instr(xtn2))]
14793pub fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
14794    unsafe {
14795        let c: uint8x8_t = simd_cast(b);
14796        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14797    }
14798}
14799#[doc = "Extract narrow"]
14800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"]
14801#[inline(always)]
14802#[target_feature(enable = "neon")]
14803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14804#[cfg_attr(test, assert_instr(xtn2))]
14805pub fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
14806    unsafe {
14807        let c: uint16x4_t = simd_cast(b);
14808        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
14809    }
14810}
14811#[doc = "Extract narrow"]
14812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"]
14813#[inline(always)]
14814#[target_feature(enable = "neon")]
14815#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14816#[cfg_attr(test, assert_instr(xtn2))]
14817pub fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
14818    unsafe {
14819        let c: uint32x2_t = simd_cast(b);
14820        simd_shuffle!(a, c, [0, 1, 2, 3])
14821    }
14822}
14823#[doc = "Multiply"]
14824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"]
14825#[inline(always)]
14826#[target_feature(enable = "neon")]
14827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14828#[cfg_attr(test, assert_instr(fmul))]
14829pub fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14830    unsafe { simd_mul(a, b) }
14831}
14832#[doc = "Multiply"]
14833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"]
14834#[inline(always)]
14835#[target_feature(enable = "neon")]
14836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14837#[cfg_attr(test, assert_instr(fmul))]
14838pub fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
14839    unsafe { simd_mul(a, b) }
14840}
14841#[doc = "Floating-point multiply"]
14842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"]
14843#[inline(always)]
14844#[target_feature(enable = "neon")]
14845#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14846#[rustc_legacy_const_generics(2)]
14847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14848pub fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
14849    static_assert!(LANE == 0);
14850    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14851}
14852#[doc = "Floating-point multiply"]
14853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f16)"]
14854#[inline(always)]
14855#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14856#[rustc_legacy_const_generics(2)]
14857#[target_feature(enable = "neon,fp16")]
14858#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
14859#[cfg(not(target_arch = "arm64ec"))]
14860pub fn vmul_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
14861    static_assert_uimm_bits!(LANE, 3);
14862    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
14863}
14864#[doc = "Floating-point multiply"]
14865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f16)"]
14866#[inline(always)]
14867#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14868#[rustc_legacy_const_generics(2)]
14869#[target_feature(enable = "neon,fp16")]
14870#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
14871#[cfg(not(target_arch = "arm64ec"))]
14872pub fn vmulq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
14873    static_assert_uimm_bits!(LANE, 3);
14874    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
14875}
14876#[doc = "Floating-point multiply"]
14877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"]
14878#[inline(always)]
14879#[target_feature(enable = "neon")]
14880#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14881#[rustc_legacy_const_generics(2)]
14882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14883pub fn vmul_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
14884    static_assert_uimm_bits!(LANE, 1);
14885    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
14886}
14887#[doc = "Vector multiply by scalar"]
14888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"]
14889#[inline(always)]
14890#[target_feature(enable = "neon")]
14891#[cfg_attr(test, assert_instr(fmul))]
14892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14893pub fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t {
14894    unsafe { simd_mul(a, vdup_n_f64(b)) }
14895}
14896#[doc = "Vector multiply by scalar"]
14897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"]
14898#[inline(always)]
14899#[target_feature(enable = "neon")]
14900#[cfg_attr(test, assert_instr(fmul))]
14901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14902pub fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t {
14903    unsafe { simd_mul(a, vdupq_n_f64(b)) }
14904}
14905#[doc = "Floating-point multiply"]
14906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"]
14907#[inline(always)]
14908#[target_feature(enable = "neon")]
14909#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14910#[rustc_legacy_const_generics(2)]
14911#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14912pub fn vmuld_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
14913    static_assert!(LANE == 0);
14914    unsafe {
14915        let b: f64 = simd_extract!(b, LANE as u32);
14916        a * b
14917    }
14918}
14919#[doc = "Add"]
14920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_f16)"]
14921#[inline(always)]
14922#[target_feature(enable = "neon,fp16")]
14923#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14924#[cfg(not(target_arch = "arm64ec"))]
14925#[cfg_attr(test, assert_instr(fmul))]
14926pub fn vmulh_f16(a: f16, b: f16) -> f16 {
14927    a * b
14928}
14929#[doc = "Floating-point multiply"]
14930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_lane_f16)"]
14931#[inline(always)]
14932#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14933#[rustc_legacy_const_generics(2)]
14934#[target_feature(enable = "neon,fp16")]
14935#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14936#[cfg(not(target_arch = "arm64ec"))]
14937pub fn vmulh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
14938    static_assert_uimm_bits!(LANE, 2);
14939    unsafe {
14940        let b: f16 = simd_extract!(b, LANE as u32);
14941        a * b
14942    }
14943}
14944#[doc = "Floating-point multiply"]
14945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_laneq_f16)"]
14946#[inline(always)]
14947#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
14948#[rustc_legacy_const_generics(2)]
14949#[target_feature(enable = "neon,fp16")]
14950#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14951#[cfg(not(target_arch = "arm64ec"))]
14952pub fn vmulh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
14953    static_assert_uimm_bits!(LANE, 3);
14954    unsafe {
14955        let b: f16 = simd_extract!(b, LANE as u32);
14956        a * b
14957    }
14958}
14959#[doc = "Multiply long"]
14960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"]
14961#[inline(always)]
14962#[target_feature(enable = "neon")]
14963#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14964#[rustc_legacy_const_generics(2)]
14965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14966pub fn vmull_high_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
14967    static_assert_uimm_bits!(LANE, 2);
14968    unsafe { vmull_high_s16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
14969}
14970#[doc = "Multiply long"]
14971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"]
14972#[inline(always)]
14973#[target_feature(enable = "neon")]
14974#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14975#[rustc_legacy_const_generics(2)]
14976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14977pub fn vmull_high_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
14978    static_assert_uimm_bits!(LANE, 3);
14979    unsafe { vmull_high_s16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
14980}
14981#[doc = "Multiply long"]
14982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"]
14983#[inline(always)]
14984#[target_feature(enable = "neon")]
14985#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14986#[rustc_legacy_const_generics(2)]
14987#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14988pub fn vmull_high_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
14989    static_assert_uimm_bits!(LANE, 1);
14990    unsafe { vmull_high_s32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
14991}
14992#[doc = "Multiply long"]
14993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"]
14994#[inline(always)]
14995#[target_feature(enable = "neon")]
14996#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
14997#[rustc_legacy_const_generics(2)]
14998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14999pub fn vmull_high_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15000    static_assert_uimm_bits!(LANE, 2);
15001    unsafe { vmull_high_s32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15002}
15003#[doc = "Multiply long"]
15004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"]
15005#[inline(always)]
15006#[target_feature(enable = "neon")]
15007#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15008#[rustc_legacy_const_generics(2)]
15009#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15010pub fn vmull_high_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t {
15011    static_assert_uimm_bits!(LANE, 2);
15012    unsafe { vmull_high_u16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
15013}
15014#[doc = "Multiply long"]
15015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"]
15016#[inline(always)]
15017#[target_feature(enable = "neon")]
15018#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15019#[rustc_legacy_const_generics(2)]
15020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15021pub fn vmull_high_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15022    static_assert_uimm_bits!(LANE, 3);
15023    unsafe { vmull_high_u16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
15024}
15025#[doc = "Multiply long"]
15026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"]
15027#[inline(always)]
15028#[target_feature(enable = "neon")]
15029#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15030#[rustc_legacy_const_generics(2)]
15031#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15032pub fn vmull_high_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t {
15033    static_assert_uimm_bits!(LANE, 1);
15034    unsafe { vmull_high_u32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15035}
15036#[doc = "Multiply long"]
15037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"]
15038#[inline(always)]
15039#[target_feature(enable = "neon")]
15040#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15041#[rustc_legacy_const_generics(2)]
15042#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15043pub fn vmull_high_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15044    static_assert_uimm_bits!(LANE, 2);
15045    unsafe { vmull_high_u32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15046}
15047#[doc = "Multiply long"]
15048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"]
15049#[inline(always)]
15050#[target_feature(enable = "neon")]
15051#[cfg_attr(test, assert_instr(smull2))]
15052#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15053pub fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
15054    vmull_high_s16(a, vdupq_n_s16(b))
15055}
15056#[doc = "Multiply long"]
15057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"]
15058#[inline(always)]
15059#[target_feature(enable = "neon")]
15060#[cfg_attr(test, assert_instr(smull2))]
15061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15062pub fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
15063    vmull_high_s32(a, vdupq_n_s32(b))
15064}
15065#[doc = "Multiply long"]
15066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"]
15067#[inline(always)]
15068#[target_feature(enable = "neon")]
15069#[cfg_attr(test, assert_instr(umull2))]
15070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15071pub fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t {
15072    vmull_high_u16(a, vdupq_n_u16(b))
15073}
15074#[doc = "Multiply long"]
15075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"]
15076#[inline(always)]
15077#[target_feature(enable = "neon")]
15078#[cfg_attr(test, assert_instr(umull2))]
15079#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15080pub fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t {
15081    vmull_high_u32(a, vdupq_n_u32(b))
15082}
15083#[doc = "Polynomial multiply long"]
15084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"]
15085#[inline(always)]
15086#[target_feature(enable = "neon,aes")]
15087#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15088#[cfg_attr(test, assert_instr(pmull2))]
15089pub fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 {
15090    unsafe { vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) }
15091}
15092#[doc = "Polynomial multiply long"]
15093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"]
15094#[inline(always)]
15095#[target_feature(enable = "neon")]
15096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15097#[cfg_attr(test, assert_instr(pmull2))]
15098pub fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t {
15099    unsafe {
15100        let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15101        let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15102        vmull_p8(a, b)
15103    }
15104}
15105#[doc = "Signed multiply long"]
15106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"]
15107#[inline(always)]
15108#[target_feature(enable = "neon")]
15109#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15110#[cfg_attr(test, assert_instr(smull2))]
15111pub fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
15112    unsafe {
15113        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15114        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15115        vmull_s8(a, b)
15116    }
15117}
15118#[doc = "Signed multiply long"]
15119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"]
15120#[inline(always)]
15121#[target_feature(enable = "neon")]
15122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15123#[cfg_attr(test, assert_instr(smull2))]
15124pub fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15125    unsafe {
15126        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15127        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15128        vmull_s16(a, b)
15129    }
15130}
15131#[doc = "Signed multiply long"]
15132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"]
15133#[inline(always)]
15134#[target_feature(enable = "neon")]
15135#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15136#[cfg_attr(test, assert_instr(smull2))]
15137pub fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15138    unsafe {
15139        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
15140        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
15141        vmull_s32(a, b)
15142    }
15143}
15144#[doc = "Unsigned multiply long"]
15145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"]
15146#[inline(always)]
15147#[target_feature(enable = "neon")]
15148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15149#[cfg_attr(test, assert_instr(umull2))]
15150pub fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
15151    unsafe {
15152        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15153        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15154        vmull_u8(a, b)
15155    }
15156}
15157#[doc = "Unsigned multiply long"]
15158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"]
15159#[inline(always)]
15160#[target_feature(enable = "neon")]
15161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15162#[cfg_attr(test, assert_instr(umull2))]
15163pub fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15164    unsafe {
15165        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15166        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15167        vmull_u16(a, b)
15168    }
15169}
15170#[doc = "Unsigned multiply long"]
15171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"]
15172#[inline(always)]
15173#[target_feature(enable = "neon")]
15174#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15175#[cfg_attr(test, assert_instr(umull2))]
15176pub fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15177    unsafe {
15178        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
15179        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
15180        vmull_u32(a, b)
15181    }
15182}
15183#[doc = "Polynomial multiply long"]
15184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"]
15185#[inline(always)]
15186#[target_feature(enable = "neon,aes")]
15187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15188#[cfg_attr(test, assert_instr(pmull))]
15189pub fn vmull_p64(a: p64, b: p64) -> p128 {
15190    unsafe extern "unadjusted" {
15191        #[cfg_attr(
15192            any(target_arch = "aarch64", target_arch = "arm64ec"),
15193            link_name = "llvm.aarch64.neon.pmull64"
15194        )]
15195        fn _vmull_p64(a: p64, b: p64) -> int8x16_t;
15196    }
15197    unsafe { transmute(_vmull_p64(a, b)) }
15198}
15199#[doc = "Floating-point multiply"]
15200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"]
15201#[inline(always)]
15202#[target_feature(enable = "neon")]
15203#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15204#[rustc_legacy_const_generics(2)]
15205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15206pub fn vmulq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15207    static_assert!(LANE == 0);
15208    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15209}
15210#[doc = "Floating-point multiply"]
15211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"]
15212#[inline(always)]
15213#[target_feature(enable = "neon")]
15214#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15215#[rustc_legacy_const_generics(2)]
15216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15217pub fn vmulq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15218    static_assert_uimm_bits!(LANE, 1);
15219    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15220}
15221#[doc = "Floating-point multiply"]
15222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"]
15223#[inline(always)]
15224#[target_feature(enable = "neon")]
15225#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15226#[rustc_legacy_const_generics(2)]
15227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15228pub fn vmuls_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15229    static_assert_uimm_bits!(LANE, 1);
15230    unsafe {
15231        let b: f32 = simd_extract!(b, LANE as u32);
15232        a * b
15233    }
15234}
15235#[doc = "Floating-point multiply"]
15236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"]
15237#[inline(always)]
15238#[target_feature(enable = "neon")]
15239#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15240#[rustc_legacy_const_generics(2)]
15241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15242pub fn vmuls_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15243    static_assert_uimm_bits!(LANE, 2);
15244    unsafe {
15245        let b: f32 = simd_extract!(b, LANE as u32);
15246        a * b
15247    }
15248}
15249#[doc = "Floating-point multiply"]
15250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"]
15251#[inline(always)]
15252#[target_feature(enable = "neon")]
15253#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15254#[rustc_legacy_const_generics(2)]
15255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15256pub fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15257    static_assert_uimm_bits!(LANE, 1);
15258    unsafe {
15259        let b: f64 = simd_extract!(b, LANE as u32);
15260        a * b
15261    }
15262}
15263#[doc = "Floating-point multiply extended"]
15264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f16)"]
15265#[inline(always)]
15266#[target_feature(enable = "neon,fp16")]
15267#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15268#[cfg(not(target_arch = "arm64ec"))]
15269#[cfg_attr(test, assert_instr(fmulx))]
15270pub fn vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15271    unsafe extern "unadjusted" {
15272        #[cfg_attr(
15273            any(target_arch = "aarch64", target_arch = "arm64ec"),
15274            link_name = "llvm.aarch64.neon.fmulx.v4f16"
15275        )]
15276        fn _vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15277    }
15278    unsafe { _vmulx_f16(a, b) }
15279}
15280#[doc = "Floating-point multiply extended"]
15281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f16)"]
15282#[inline(always)]
15283#[target_feature(enable = "neon,fp16")]
15284#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15285#[cfg(not(target_arch = "arm64ec"))]
15286#[cfg_attr(test, assert_instr(fmulx))]
15287pub fn vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15288    unsafe extern "unadjusted" {
15289        #[cfg_attr(
15290            any(target_arch = "aarch64", target_arch = "arm64ec"),
15291            link_name = "llvm.aarch64.neon.fmulx.v8f16"
15292        )]
15293        fn _vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15294    }
15295    unsafe { _vmulxq_f16(a, b) }
15296}
15297#[doc = "Floating-point multiply extended"]
15298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"]
15299#[inline(always)]
15300#[target_feature(enable = "neon")]
15301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15302#[cfg_attr(test, assert_instr(fmulx))]
15303pub fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15304    unsafe extern "unadjusted" {
15305        #[cfg_attr(
15306            any(target_arch = "aarch64", target_arch = "arm64ec"),
15307            link_name = "llvm.aarch64.neon.fmulx.v2f32"
15308        )]
15309        fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
15310    }
15311    unsafe { _vmulx_f32(a, b) }
15312}
15313#[doc = "Floating-point multiply extended"]
15314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"]
15315#[inline(always)]
15316#[target_feature(enable = "neon")]
15317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15318#[cfg_attr(test, assert_instr(fmulx))]
15319pub fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15320    unsafe extern "unadjusted" {
15321        #[cfg_attr(
15322            any(target_arch = "aarch64", target_arch = "arm64ec"),
15323            link_name = "llvm.aarch64.neon.fmulx.v4f32"
15324        )]
15325        fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15326    }
15327    unsafe { _vmulxq_f32(a, b) }
15328}
15329#[doc = "Floating-point multiply extended"]
15330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"]
15331#[inline(always)]
15332#[target_feature(enable = "neon")]
15333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15334#[cfg_attr(test, assert_instr(fmulx))]
15335pub fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15336    unsafe extern "unadjusted" {
15337        #[cfg_attr(
15338            any(target_arch = "aarch64", target_arch = "arm64ec"),
15339            link_name = "llvm.aarch64.neon.fmulx.v1f64"
15340        )]
15341        fn _vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
15342    }
15343    unsafe { _vmulx_f64(a, b) }
15344}
15345#[doc = "Floating-point multiply extended"]
15346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"]
15347#[inline(always)]
15348#[target_feature(enable = "neon")]
15349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15350#[cfg_attr(test, assert_instr(fmulx))]
15351pub fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15352    unsafe extern "unadjusted" {
15353        #[cfg_attr(
15354            any(target_arch = "aarch64", target_arch = "arm64ec"),
15355            link_name = "llvm.aarch64.neon.fmulx.v2f64"
15356        )]
15357        fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15358    }
15359    unsafe { _vmulxq_f64(a, b) }
15360}
15361#[doc = "Floating-point multiply extended"]
15362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f16)"]
15363#[inline(always)]
15364#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15365#[rustc_legacy_const_generics(2)]
15366#[target_feature(enable = "neon,fp16")]
15367#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15368#[cfg(not(target_arch = "arm64ec"))]
15369pub fn vmulx_lane_f16<const LANE: i32>(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15370    static_assert_uimm_bits!(LANE, 2);
15371    unsafe { vmulx_f16(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15372}
15373#[doc = "Floating-point multiply extended"]
15374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f16)"]
15375#[inline(always)]
15376#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15377#[rustc_legacy_const_generics(2)]
15378#[target_feature(enable = "neon,fp16")]
15379#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15380#[cfg(not(target_arch = "arm64ec"))]
15381pub fn vmulx_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
15382    static_assert_uimm_bits!(LANE, 3);
15383    unsafe { vmulx_f16(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15384}
15385#[doc = "Floating-point multiply extended"]
15386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f16)"]
15387#[inline(always)]
15388#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15389#[rustc_legacy_const_generics(2)]
15390#[target_feature(enable = "neon,fp16")]
15391#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15392#[cfg(not(target_arch = "arm64ec"))]
15393pub fn vmulxq_lane_f16<const LANE: i32>(a: float16x8_t, b: float16x4_t) -> float16x8_t {
15394    static_assert_uimm_bits!(LANE, 2);
15395    unsafe { vmulxq_f16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
15396}
15397#[doc = "Floating-point multiply extended"]
15398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f16)"]
15399#[inline(always)]
15400#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15401#[rustc_legacy_const_generics(2)]
15402#[target_feature(enable = "neon,fp16")]
15403#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15404#[cfg(not(target_arch = "arm64ec"))]
15405pub fn vmulxq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15406    static_assert_uimm_bits!(LANE, 3);
15407    unsafe { vmulxq_f16(a, simd_shuffle!(b, b, [LANE as u32; 8])) }
15408}
15409#[doc = "Floating-point multiply extended"]
15410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"]
15411#[inline(always)]
15412#[target_feature(enable = "neon")]
15413#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15414#[rustc_legacy_const_generics(2)]
15415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15416pub fn vmulx_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15417    static_assert_uimm_bits!(LANE, 1);
15418    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15419}
15420#[doc = "Floating-point multiply extended"]
15421#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"]
15422#[inline(always)]
15423#[target_feature(enable = "neon")]
15424#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15425#[rustc_legacy_const_generics(2)]
15426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15427pub fn vmulx_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
15428    static_assert_uimm_bits!(LANE, 2);
15429    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15430}
15431#[doc = "Floating-point multiply extended"]
15432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"]
15433#[inline(always)]
15434#[target_feature(enable = "neon")]
15435#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15436#[rustc_legacy_const_generics(2)]
15437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15438pub fn vmulxq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
15439    static_assert_uimm_bits!(LANE, 1);
15440    unsafe { vmulxq_f32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15441}
15442#[doc = "Floating-point multiply extended"]
15443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"]
15444#[inline(always)]
15445#[target_feature(enable = "neon")]
15446#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15447#[rustc_legacy_const_generics(2)]
15448#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15449pub fn vmulxq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15450    static_assert_uimm_bits!(LANE, 2);
15451    unsafe { vmulxq_f32(a, simd_shuffle!(b, b, [LANE as u32; 4])) }
15452}
15453#[doc = "Floating-point multiply extended"]
15454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"]
15455#[inline(always)]
15456#[target_feature(enable = "neon")]
15457#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15458#[rustc_legacy_const_generics(2)]
15459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15460pub fn vmulxq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15461    static_assert_uimm_bits!(LANE, 1);
15462    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15463}
15464#[doc = "Floating-point multiply extended"]
15465#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"]
15466#[inline(always)]
15467#[target_feature(enable = "neon")]
15468#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15469#[rustc_legacy_const_generics(2)]
15470#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15471pub fn vmulx_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15472    static_assert!(LANE == 0);
15473    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15474}
15475#[doc = "Floating-point multiply extended"]
15476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"]
15477#[inline(always)]
15478#[target_feature(enable = "neon")]
15479#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15480#[rustc_legacy_const_generics(2)]
15481#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15482pub fn vmulx_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
15483    static_assert_uimm_bits!(LANE, 1);
15484    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15485}
15486#[doc = "Vector multiply by scalar"]
15487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_n_f16)"]
15488#[inline(always)]
15489#[cfg_attr(test, assert_instr(fmulx))]
15490#[target_feature(enable = "neon,fp16")]
15491#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15492#[cfg(not(target_arch = "arm64ec"))]
15493pub fn vmulx_n_f16(a: float16x4_t, b: f16) -> float16x4_t {
15494    vmulx_f16(a, vdup_n_f16(b))
15495}
15496#[doc = "Vector multiply by scalar"]
15497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_n_f16)"]
15498#[inline(always)]
15499#[cfg_attr(test, assert_instr(fmulx))]
15500#[target_feature(enable = "neon,fp16")]
15501#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15502#[cfg(not(target_arch = "arm64ec"))]
15503pub fn vmulxq_n_f16(a: float16x8_t, b: f16) -> float16x8_t {
15504    vmulxq_f16(a, vdupq_n_f16(b))
15505}
15506#[doc = "Floating-point multiply extended"]
15507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"]
15508#[inline(always)]
15509#[target_feature(enable = "neon")]
15510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15511#[cfg_attr(test, assert_instr(fmulx))]
15512pub fn vmulxd_f64(a: f64, b: f64) -> f64 {
15513    unsafe extern "unadjusted" {
15514        #[cfg_attr(
15515            any(target_arch = "aarch64", target_arch = "arm64ec"),
15516            link_name = "llvm.aarch64.neon.fmulx.f64"
15517        )]
15518        fn _vmulxd_f64(a: f64, b: f64) -> f64;
15519    }
15520    unsafe { _vmulxd_f64(a, b) }
15521}
15522#[doc = "Floating-point multiply extended"]
15523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"]
15524#[inline(always)]
15525#[target_feature(enable = "neon")]
15526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15527#[cfg_attr(test, assert_instr(fmulx))]
15528pub fn vmulxs_f32(a: f32, b: f32) -> f32 {
15529    unsafe extern "unadjusted" {
15530        #[cfg_attr(
15531            any(target_arch = "aarch64", target_arch = "arm64ec"),
15532            link_name = "llvm.aarch64.neon.fmulx.f32"
15533        )]
15534        fn _vmulxs_f32(a: f32, b: f32) -> f32;
15535    }
15536    unsafe { _vmulxs_f32(a, b) }
15537}
15538#[doc = "Floating-point multiply extended"]
15539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"]
15540#[inline(always)]
15541#[target_feature(enable = "neon")]
15542#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15543#[rustc_legacy_const_generics(2)]
15544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15545pub fn vmulxd_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15546    static_assert!(LANE == 0);
15547    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15548}
15549#[doc = "Floating-point multiply extended"]
15550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"]
15551#[inline(always)]
15552#[target_feature(enable = "neon")]
15553#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15554#[rustc_legacy_const_generics(2)]
15555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15556pub fn vmulxd_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15557    static_assert_uimm_bits!(LANE, 1);
15558    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15559}
15560#[doc = "Floating-point multiply extended"]
15561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"]
15562#[inline(always)]
15563#[target_feature(enable = "neon")]
15564#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15565#[rustc_legacy_const_generics(2)]
15566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15567pub fn vmulxs_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15568    static_assert_uimm_bits!(LANE, 1);
15569    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15570}
15571#[doc = "Floating-point multiply extended"]
15572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"]
15573#[inline(always)]
15574#[target_feature(enable = "neon")]
15575#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15576#[rustc_legacy_const_generics(2)]
15577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15578pub fn vmulxs_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15579    static_assert_uimm_bits!(LANE, 2);
15580    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15581}
15582#[doc = "Floating-point multiply extended"]
15583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_f16)"]
15584#[inline(always)]
15585#[target_feature(enable = "neon,fp16")]
15586#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15587#[cfg(not(target_arch = "arm64ec"))]
15588#[cfg_attr(test, assert_instr(fmulx))]
15589pub fn vmulxh_f16(a: f16, b: f16) -> f16 {
15590    unsafe extern "unadjusted" {
15591        #[cfg_attr(
15592            any(target_arch = "aarch64", target_arch = "arm64ec"),
15593            link_name = "llvm.aarch64.neon.fmulx.f16"
15594        )]
15595        fn _vmulxh_f16(a: f16, b: f16) -> f16;
15596    }
15597    unsafe { _vmulxh_f16(a, b) }
15598}
15599#[doc = "Floating-point multiply extended"]
15600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_lane_f16)"]
15601#[inline(always)]
15602#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15603#[rustc_legacy_const_generics(2)]
15604#[target_feature(enable = "neon,fp16")]
15605#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15606#[cfg(not(target_arch = "arm64ec"))]
15607pub fn vmulxh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
15608    static_assert_uimm_bits!(LANE, 2);
15609    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15610}
15611#[doc = "Floating-point multiply extended"]
15612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_laneq_f16)"]
15613#[inline(always)]
15614#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15615#[rustc_legacy_const_generics(2)]
15616#[target_feature(enable = "neon,fp16")]
15617#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15618#[cfg(not(target_arch = "arm64ec"))]
15619pub fn vmulxh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
15620    static_assert_uimm_bits!(LANE, 3);
15621    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
15622}
15623#[doc = "Floating-point multiply extended"]
15624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"]
15625#[inline(always)]
15626#[target_feature(enable = "neon")]
15627#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15628#[rustc_legacy_const_generics(2)]
15629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15630pub fn vmulxq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15631    static_assert!(LANE == 0);
15632    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32; 2])) }
15633}
15634#[doc = "Negate"]
15635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"]
15636#[inline(always)]
15637#[target_feature(enable = "neon")]
15638#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15639#[cfg_attr(test, assert_instr(fneg))]
15640pub fn vneg_f64(a: float64x1_t) -> float64x1_t {
15641    unsafe { simd_neg(a) }
15642}
15643#[doc = "Negate"]
15644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"]
15645#[inline(always)]
15646#[target_feature(enable = "neon")]
15647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15648#[cfg_attr(test, assert_instr(fneg))]
15649pub fn vnegq_f64(a: float64x2_t) -> float64x2_t {
15650    unsafe { simd_neg(a) }
15651}
15652#[doc = "Negate"]
15653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"]
15654#[inline(always)]
15655#[target_feature(enable = "neon")]
15656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15657#[cfg_attr(test, assert_instr(neg))]
15658pub fn vneg_s64(a: int64x1_t) -> int64x1_t {
15659    unsafe { simd_neg(a) }
15660}
15661#[doc = "Negate"]
15662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"]
15663#[inline(always)]
15664#[target_feature(enable = "neon")]
15665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15666#[cfg_attr(test, assert_instr(neg))]
15667pub fn vnegq_s64(a: int64x2_t) -> int64x2_t {
15668    unsafe { simd_neg(a) }
15669}
15670#[doc = "Negate"]
15671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"]
15672#[inline(always)]
15673#[target_feature(enable = "neon")]
15674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15675#[cfg_attr(test, assert_instr(neg))]
15676pub fn vnegd_s64(a: i64) -> i64 {
15677    a.wrapping_neg()
15678}
15679#[doc = "Negate"]
15680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegh_f16)"]
15681#[inline(always)]
15682#[target_feature(enable = "neon,fp16")]
15683#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15684#[cfg(not(target_arch = "arm64ec"))]
15685#[cfg_attr(test, assert_instr(fneg))]
15686pub fn vnegh_f16(a: f16) -> f16 {
15687    -a
15688}
15689#[doc = "Floating-point add pairwise"]
15690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"]
15691#[inline(always)]
15692#[target_feature(enable = "neon")]
15693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15694#[cfg_attr(test, assert_instr(nop))]
15695pub fn vpaddd_f64(a: float64x2_t) -> f64 {
15696    unsafe {
15697        let a1: f64 = simd_extract!(a, 0);
15698        let a2: f64 = simd_extract!(a, 1);
15699        a1 + a2
15700    }
15701}
15702#[doc = "Floating-point add pairwise"]
15703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"]
15704#[inline(always)]
15705#[target_feature(enable = "neon")]
15706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15707#[cfg_attr(test, assert_instr(nop))]
15708pub fn vpadds_f32(a: float32x2_t) -> f32 {
15709    unsafe {
15710        let a1: f32 = simd_extract!(a, 0);
15711        let a2: f32 = simd_extract!(a, 1);
15712        a1 + a2
15713    }
15714}
15715#[doc = "Add pairwise"]
15716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"]
15717#[inline(always)]
15718#[target_feature(enable = "neon")]
15719#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15720#[cfg_attr(test, assert_instr(addp))]
15721pub fn vpaddd_s64(a: int64x2_t) -> i64 {
15722    unsafe { simd_reduce_add_ordered(a, 0) }
15723}
15724#[doc = "Add pairwise"]
15725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"]
15726#[inline(always)]
15727#[target_feature(enable = "neon")]
15728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15729#[cfg_attr(test, assert_instr(addp))]
15730pub fn vpaddd_u64(a: uint64x2_t) -> u64 {
15731    unsafe { simd_reduce_add_ordered(a, 0) }
15732}
15733#[doc = "Floating-point add pairwise"]
15734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"]
15735#[inline(always)]
15736#[target_feature(enable = "neon,fp16")]
15737#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15738#[cfg(not(target_arch = "arm64ec"))]
15739#[cfg_attr(test, assert_instr(faddp))]
15740pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15741    unsafe {
15742        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>());
15743        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>());
15744        simd_add(even, odd)
15745    }
15746}
15747#[doc = "Floating-point add pairwise"]
15748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"]
15749#[inline(always)]
15750#[target_feature(enable = "neon")]
15751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15752#[cfg_attr(test, assert_instr(faddp))]
15753pub fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15754    unsafe {
15755        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>());
15756        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>());
15757        simd_add(even, odd)
15758    }
15759}
15760#[doc = "Floating-point add pairwise"]
15761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"]
15762#[inline(always)]
15763#[target_feature(enable = "neon")]
15764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15765#[cfg_attr(test, assert_instr(faddp))]
15766pub fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15767    unsafe {
15768        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>());
15769        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>());
15770        simd_add(even, odd)
15771    }
15772}
15773#[doc = "Add Pairwise"]
15774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"]
15775#[inline(always)]
15776#[target_feature(enable = "neon")]
15777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15778#[cfg_attr(test, assert_instr(addp))]
15779pub fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
15780    unsafe {
15781        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<16>());
15782        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<16>());
15783        simd_add(even, odd)
15784    }
15785}
15786#[doc = "Add Pairwise"]
15787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"]
15788#[inline(always)]
15789#[target_feature(enable = "neon")]
15790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15791#[cfg_attr(test, assert_instr(addp))]
15792pub fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
15793    unsafe {
15794        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>());
15795        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>());
15796        simd_add(even, odd)
15797    }
15798}
15799#[doc = "Add Pairwise"]
15800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"]
15801#[inline(always)]
15802#[target_feature(enable = "neon")]
15803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15804#[cfg_attr(test, assert_instr(addp))]
15805pub fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
15806    unsafe {
15807        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>());
15808        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>());
15809        simd_add(even, odd)
15810    }
15811}
15812#[doc = "Add Pairwise"]
15813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"]
15814#[inline(always)]
15815#[target_feature(enable = "neon")]
15816#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15817#[cfg_attr(test, assert_instr(addp))]
15818pub fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
15819    unsafe {
15820        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>());
15821        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>());
15822        simd_add(even, odd)
15823    }
15824}
15825#[doc = "Add Pairwise"]
15826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
15827#[inline(always)]
15828#[target_feature(enable = "neon")]
15829#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15830#[cfg_attr(test, assert_instr(addp))]
15831pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
15832    unsafe {
15833        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<16>());
15834        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<16>());
15835        simd_add(even, odd)
15836    }
15837}
15838#[doc = "Add Pairwise"]
15839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
15840#[inline(always)]
15841#[target_feature(enable = "neon")]
15842#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15843#[cfg_attr(test, assert_instr(addp))]
15844pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
15845    unsafe {
15846        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<8>());
15847        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<8>());
15848        simd_add(even, odd)
15849    }
15850}
15851#[doc = "Add Pairwise"]
15852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
15853#[inline(always)]
15854#[target_feature(enable = "neon")]
15855#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15856#[cfg_attr(test, assert_instr(addp))]
15857pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
15858    unsafe {
15859        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<4>());
15860        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<4>());
15861        simd_add(even, odd)
15862    }
15863}
15864#[doc = "Add Pairwise"]
15865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
15866#[inline(always)]
15867#[target_feature(enable = "neon")]
15868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15869#[cfg_attr(test, assert_instr(addp))]
15870pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
15871    unsafe {
15872        let even = simd_shuffle!(a, b, crate::core_arch::macros::even::<2>());
15873        let odd = simd_shuffle!(a, b, crate::core_arch::macros::odd::<2>());
15874        simd_add(even, odd)
15875    }
15876}
15877#[doc = "Floating-point add pairwise"]
15878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f16)"]
15879#[inline(always)]
15880#[target_feature(enable = "neon,fp16")]
15881#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15882#[cfg(not(target_arch = "arm64ec"))]
15883#[cfg_attr(test, assert_instr(fmaxp))]
15884pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15885    unsafe extern "unadjusted" {
15886        #[cfg_attr(
15887            any(target_arch = "aarch64", target_arch = "arm64ec"),
15888            link_name = "llvm.aarch64.neon.fmaxp.v4f16"
15889        )]
15890        fn _vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15891    }
15892    unsafe { _vpmax_f16(a, b) }
15893}
15894#[doc = "Floating-point add pairwise"]
15895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f16)"]
15896#[inline(always)]
15897#[target_feature(enable = "neon,fp16")]
15898#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15899#[cfg(not(target_arch = "arm64ec"))]
15900#[cfg_attr(test, assert_instr(fmaxp))]
15901pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15902    unsafe extern "unadjusted" {
15903        #[cfg_attr(
15904            any(target_arch = "aarch64", target_arch = "arm64ec"),
15905            link_name = "llvm.aarch64.neon.fmaxp.v8f16"
15906        )]
15907        fn _vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15908    }
15909    unsafe { _vpmaxq_f16(a, b) }
15910}
15911#[doc = "Floating-point add pairwise"]
15912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f16)"]
15913#[inline(always)]
15914#[target_feature(enable = "neon,fp16")]
15915#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15916#[cfg(not(target_arch = "arm64ec"))]
15917#[cfg_attr(test, assert_instr(fmaxnmp))]
15918pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15919    unsafe extern "unadjusted" {
15920        #[cfg_attr(
15921            any(target_arch = "aarch64", target_arch = "arm64ec"),
15922            link_name = "llvm.aarch64.neon.fmaxnmp.v4f16"
15923        )]
15924        fn _vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15925    }
15926    unsafe { _vpmaxnm_f16(a, b) }
15927}
15928#[doc = "Floating-point add pairwise"]
15929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f16)"]
15930#[inline(always)]
15931#[target_feature(enable = "neon,fp16")]
15932#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15933#[cfg(not(target_arch = "arm64ec"))]
15934#[cfg_attr(test, assert_instr(fmaxnmp))]
15935pub fn vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15936    unsafe extern "unadjusted" {
15937        #[cfg_attr(
15938            any(target_arch = "aarch64", target_arch = "arm64ec"),
15939            link_name = "llvm.aarch64.neon.fmaxnmp.v8f16"
15940        )]
15941        fn _vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15942    }
15943    unsafe { _vpmaxnmq_f16(a, b) }
15944}
15945#[doc = "Floating-point Maximum Number Pairwise (vector)."]
15946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"]
15947#[inline(always)]
15948#[target_feature(enable = "neon")]
15949#[cfg_attr(test, assert_instr(fmaxnmp))]
15950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15951pub fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15952    unsafe extern "unadjusted" {
15953        #[cfg_attr(
15954            any(target_arch = "aarch64", target_arch = "arm64ec"),
15955            link_name = "llvm.aarch64.neon.fmaxnmp.v2f32"
15956        )]
15957        fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
15958    }
15959    unsafe { _vpmaxnm_f32(a, b) }
15960}
15961#[doc = "Floating-point Maximum Number Pairwise (vector)."]
15962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"]
15963#[inline(always)]
15964#[target_feature(enable = "neon")]
15965#[cfg_attr(test, assert_instr(fmaxnmp))]
15966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15967pub fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15968    unsafe extern "unadjusted" {
15969        #[cfg_attr(
15970            any(target_arch = "aarch64", target_arch = "arm64ec"),
15971            link_name = "llvm.aarch64.neon.fmaxnmp.v4f32"
15972        )]
15973        fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15974    }
15975    unsafe { _vpmaxnmq_f32(a, b) }
15976}
15977#[doc = "Floating-point Maximum Number Pairwise (vector)."]
15978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"]
15979#[inline(always)]
15980#[target_feature(enable = "neon")]
15981#[cfg_attr(test, assert_instr(fmaxnmp))]
15982#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15983pub fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15984    unsafe extern "unadjusted" {
15985        #[cfg_attr(
15986            any(target_arch = "aarch64", target_arch = "arm64ec"),
15987            link_name = "llvm.aarch64.neon.fmaxnmp.v2f64"
15988        )]
15989        fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15990    }
15991    unsafe { _vpmaxnmq_f64(a, b) }
15992}
15993#[doc = "Floating-point maximum number pairwise"]
15994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"]
15995#[inline(always)]
15996#[target_feature(enable = "neon")]
15997#[cfg_attr(test, assert_instr(fmaxnmp))]
15998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15999pub fn vpmaxnmqd_f64(a: float64x2_t) -> f64 {
16000    unsafe extern "unadjusted" {
16001        #[cfg_attr(
16002            any(target_arch = "aarch64", target_arch = "arm64ec"),
16003            link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64"
16004        )]
16005        fn _vpmaxnmqd_f64(a: float64x2_t) -> f64;
16006    }
16007    unsafe { _vpmaxnmqd_f64(a) }
16008}
16009#[doc = "Floating-point maximum number pairwise"]
16010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"]
16011#[inline(always)]
16012#[target_feature(enable = "neon")]
16013#[cfg_attr(test, assert_instr(fmaxnmp))]
16014#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16015pub fn vpmaxnms_f32(a: float32x2_t) -> f32 {
16016    unsafe extern "unadjusted" {
16017        #[cfg_attr(
16018            any(target_arch = "aarch64", target_arch = "arm64ec"),
16019            link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32"
16020        )]
16021        fn _vpmaxnms_f32(a: float32x2_t) -> f32;
16022    }
16023    unsafe { _vpmaxnms_f32(a) }
16024}
16025#[doc = "Folding maximum of adjacent pairs"]
16026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"]
16027#[inline(always)]
16028#[target_feature(enable = "neon")]
16029#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16030#[cfg_attr(test, assert_instr(fmaxp))]
16031pub fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16032    unsafe extern "unadjusted" {
16033        #[cfg_attr(
16034            any(target_arch = "aarch64", target_arch = "arm64ec"),
16035            link_name = "llvm.aarch64.neon.fmaxp.v4f32"
16036        )]
16037        fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16038    }
16039    unsafe { _vpmaxq_f32(a, b) }
16040}
16041#[doc = "Folding maximum of adjacent pairs"]
16042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"]
16043#[inline(always)]
16044#[target_feature(enable = "neon")]
16045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16046#[cfg_attr(test, assert_instr(fmaxp))]
16047pub fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16048    unsafe extern "unadjusted" {
16049        #[cfg_attr(
16050            any(target_arch = "aarch64", target_arch = "arm64ec"),
16051            link_name = "llvm.aarch64.neon.fmaxp.v2f64"
16052        )]
16053        fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16054    }
16055    unsafe { _vpmaxq_f64(a, b) }
16056}
16057#[doc = "Folding maximum of adjacent pairs"]
16058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"]
16059#[inline(always)]
16060#[target_feature(enable = "neon")]
16061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16062#[cfg_attr(test, assert_instr(smaxp))]
16063pub fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16064    unsafe extern "unadjusted" {
16065        #[cfg_attr(
16066            any(target_arch = "aarch64", target_arch = "arm64ec"),
16067            link_name = "llvm.aarch64.neon.smaxp.v16i8"
16068        )]
16069        fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16070    }
16071    unsafe { _vpmaxq_s8(a, b) }
16072}
16073#[doc = "Folding maximum of adjacent pairs"]
16074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"]
16075#[inline(always)]
16076#[target_feature(enable = "neon")]
16077#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16078#[cfg_attr(test, assert_instr(smaxp))]
16079pub fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16080    unsafe extern "unadjusted" {
16081        #[cfg_attr(
16082            any(target_arch = "aarch64", target_arch = "arm64ec"),
16083            link_name = "llvm.aarch64.neon.smaxp.v8i16"
16084        )]
16085        fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16086    }
16087    unsafe { _vpmaxq_s16(a, b) }
16088}
16089#[doc = "Folding maximum of adjacent pairs"]
16090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"]
16091#[inline(always)]
16092#[target_feature(enable = "neon")]
16093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16094#[cfg_attr(test, assert_instr(smaxp))]
16095pub fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16096    unsafe extern "unadjusted" {
16097        #[cfg_attr(
16098            any(target_arch = "aarch64", target_arch = "arm64ec"),
16099            link_name = "llvm.aarch64.neon.smaxp.v4i32"
16100        )]
16101        fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16102    }
16103    unsafe { _vpmaxq_s32(a, b) }
16104}
16105#[doc = "Folding maximum of adjacent pairs"]
16106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"]
16107#[inline(always)]
16108#[target_feature(enable = "neon")]
16109#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16110#[cfg_attr(test, assert_instr(umaxp))]
16111pub fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16112    unsafe extern "unadjusted" {
16113        #[cfg_attr(
16114            any(target_arch = "aarch64", target_arch = "arm64ec"),
16115            link_name = "llvm.aarch64.neon.umaxp.v16i8"
16116        )]
16117        fn _vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16118    }
16119    unsafe { _vpmaxq_u8(a, b) }
16120}
16121#[doc = "Folding maximum of adjacent pairs"]
16122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"]
16123#[inline(always)]
16124#[target_feature(enable = "neon")]
16125#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16126#[cfg_attr(test, assert_instr(umaxp))]
16127pub fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16128    unsafe extern "unadjusted" {
16129        #[cfg_attr(
16130            any(target_arch = "aarch64", target_arch = "arm64ec"),
16131            link_name = "llvm.aarch64.neon.umaxp.v8i16"
16132        )]
16133        fn _vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16134    }
16135    unsafe { _vpmaxq_u16(a, b) }
16136}
16137#[doc = "Folding maximum of adjacent pairs"]
16138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"]
16139#[inline(always)]
16140#[target_feature(enable = "neon")]
16141#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16142#[cfg_attr(test, assert_instr(umaxp))]
16143pub fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16144    unsafe extern "unadjusted" {
16145        #[cfg_attr(
16146            any(target_arch = "aarch64", target_arch = "arm64ec"),
16147            link_name = "llvm.aarch64.neon.umaxp.v4i32"
16148        )]
16149        fn _vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16150    }
16151    unsafe { _vpmaxq_u32(a, b) }
16152}
16153#[doc = "Floating-point maximum pairwise"]
16154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"]
16155#[inline(always)]
16156#[target_feature(enable = "neon")]
16157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16158#[cfg_attr(test, assert_instr(fmaxp))]
16159pub fn vpmaxqd_f64(a: float64x2_t) -> f64 {
16160    unsafe extern "unadjusted" {
16161        #[cfg_attr(
16162            any(target_arch = "aarch64", target_arch = "arm64ec"),
16163            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
16164        )]
16165        fn _vpmaxqd_f64(a: float64x2_t) -> f64;
16166    }
16167    unsafe { _vpmaxqd_f64(a) }
16168}
16169#[doc = "Floating-point maximum pairwise"]
16170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"]
16171#[inline(always)]
16172#[target_feature(enable = "neon")]
16173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16174#[cfg_attr(test, assert_instr(fmaxp))]
16175pub fn vpmaxs_f32(a: float32x2_t) -> f32 {
16176    unsafe extern "unadjusted" {
16177        #[cfg_attr(
16178            any(target_arch = "aarch64", target_arch = "arm64ec"),
16179            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
16180        )]
16181        fn _vpmaxs_f32(a: float32x2_t) -> f32;
16182    }
16183    unsafe { _vpmaxs_f32(a) }
16184}
16185#[doc = "Floating-point add pairwise"]
16186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f16)"]
16187#[inline(always)]
16188#[target_feature(enable = "neon,fp16")]
16189#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16190#[cfg(not(target_arch = "arm64ec"))]
16191#[cfg_attr(test, assert_instr(fminp))]
16192pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16193    unsafe extern "unadjusted" {
16194        #[cfg_attr(
16195            any(target_arch = "aarch64", target_arch = "arm64ec"),
16196            link_name = "llvm.aarch64.neon.fminp.v4f16"
16197        )]
16198        fn _vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16199    }
16200    unsafe { _vpmin_f16(a, b) }
16201}
16202#[doc = "Floating-point add pairwise"]
16203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f16)"]
16204#[inline(always)]
16205#[target_feature(enable = "neon,fp16")]
16206#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16207#[cfg(not(target_arch = "arm64ec"))]
16208#[cfg_attr(test, assert_instr(fminp))]
16209pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16210    unsafe extern "unadjusted" {
16211        #[cfg_attr(
16212            any(target_arch = "aarch64", target_arch = "arm64ec"),
16213            link_name = "llvm.aarch64.neon.fminp.v8f16"
16214        )]
16215        fn _vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16216    }
16217    unsafe { _vpminq_f16(a, b) }
16218}
16219#[doc = "Floating-point add pairwise"]
16220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f16)"]
16221#[inline(always)]
16222#[target_feature(enable = "neon,fp16")]
16223#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16224#[cfg(not(target_arch = "arm64ec"))]
16225#[cfg_attr(test, assert_instr(fminnmp))]
16226pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16227    unsafe extern "unadjusted" {
16228        #[cfg_attr(
16229            any(target_arch = "aarch64", target_arch = "arm64ec"),
16230            link_name = "llvm.aarch64.neon.fminnmp.v4f16"
16231        )]
16232        fn _vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16233    }
16234    unsafe { _vpminnm_f16(a, b) }
16235}
16236#[doc = "Floating-point add pairwise"]
16237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f16)"]
16238#[inline(always)]
16239#[target_feature(enable = "neon,fp16")]
16240#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16241#[cfg(not(target_arch = "arm64ec"))]
16242#[cfg_attr(test, assert_instr(fminnmp))]
16243pub fn vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16244    unsafe extern "unadjusted" {
16245        #[cfg_attr(
16246            any(target_arch = "aarch64", target_arch = "arm64ec"),
16247            link_name = "llvm.aarch64.neon.fminnmp.v8f16"
16248        )]
16249        fn _vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16250    }
16251    unsafe { _vpminnmq_f16(a, b) }
16252}
16253#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"]
16255#[inline(always)]
16256#[target_feature(enable = "neon")]
16257#[cfg_attr(test, assert_instr(fminnmp))]
16258#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16259pub fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16260    unsafe extern "unadjusted" {
16261        #[cfg_attr(
16262            any(target_arch = "aarch64", target_arch = "arm64ec"),
16263            link_name = "llvm.aarch64.neon.fminnmp.v2f32"
16264        )]
16265        fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16266    }
16267    unsafe { _vpminnm_f32(a, b) }
16268}
16269#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"]
16271#[inline(always)]
16272#[target_feature(enable = "neon")]
16273#[cfg_attr(test, assert_instr(fminnmp))]
16274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16275pub fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16276    unsafe extern "unadjusted" {
16277        #[cfg_attr(
16278            any(target_arch = "aarch64", target_arch = "arm64ec"),
16279            link_name = "llvm.aarch64.neon.fminnmp.v4f32"
16280        )]
16281        fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16282    }
16283    unsafe { _vpminnmq_f32(a, b) }
16284}
16285#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"]
16287#[inline(always)]
16288#[target_feature(enable = "neon")]
16289#[cfg_attr(test, assert_instr(fminnmp))]
16290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16291pub fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16292    unsafe extern "unadjusted" {
16293        #[cfg_attr(
16294            any(target_arch = "aarch64", target_arch = "arm64ec"),
16295            link_name = "llvm.aarch64.neon.fminnmp.v2f64"
16296        )]
16297        fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16298    }
16299    unsafe { _vpminnmq_f64(a, b) }
16300}
16301#[doc = "Floating-point minimum number pairwise"]
16302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"]
16303#[inline(always)]
16304#[target_feature(enable = "neon")]
16305#[cfg_attr(test, assert_instr(fminnmp))]
16306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16307pub fn vpminnmqd_f64(a: float64x2_t) -> f64 {
16308    unsafe extern "unadjusted" {
16309        #[cfg_attr(
16310            any(target_arch = "aarch64", target_arch = "arm64ec"),
16311            link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64"
16312        )]
16313        fn _vpminnmqd_f64(a: float64x2_t) -> f64;
16314    }
16315    unsafe { _vpminnmqd_f64(a) }
16316}
16317#[doc = "Floating-point minimum number pairwise"]
16318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"]
16319#[inline(always)]
16320#[target_feature(enable = "neon")]
16321#[cfg_attr(test, assert_instr(fminnmp))]
16322#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16323pub fn vpminnms_f32(a: float32x2_t) -> f32 {
16324    unsafe extern "unadjusted" {
16325        #[cfg_attr(
16326            any(target_arch = "aarch64", target_arch = "arm64ec"),
16327            link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32"
16328        )]
16329        fn _vpminnms_f32(a: float32x2_t) -> f32;
16330    }
16331    unsafe { _vpminnms_f32(a) }
16332}
16333#[doc = "Folding minimum of adjacent pairs"]
16334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"]
16335#[inline(always)]
16336#[target_feature(enable = "neon")]
16337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16338#[cfg_attr(test, assert_instr(fminp))]
16339pub fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16340    unsafe extern "unadjusted" {
16341        #[cfg_attr(
16342            any(target_arch = "aarch64", target_arch = "arm64ec"),
16343            link_name = "llvm.aarch64.neon.fminp.v4f32"
16344        )]
16345        fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16346    }
16347    unsafe { _vpminq_f32(a, b) }
16348}
16349#[doc = "Folding minimum of adjacent pairs"]
16350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"]
16351#[inline(always)]
16352#[target_feature(enable = "neon")]
16353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16354#[cfg_attr(test, assert_instr(fminp))]
16355pub fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16356    unsafe extern "unadjusted" {
16357        #[cfg_attr(
16358            any(target_arch = "aarch64", target_arch = "arm64ec"),
16359            link_name = "llvm.aarch64.neon.fminp.v2f64"
16360        )]
16361        fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16362    }
16363    unsafe { _vpminq_f64(a, b) }
16364}
16365#[doc = "Folding minimum of adjacent pairs"]
16366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"]
16367#[inline(always)]
16368#[target_feature(enable = "neon")]
16369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16370#[cfg_attr(test, assert_instr(sminp))]
16371pub fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16372    unsafe extern "unadjusted" {
16373        #[cfg_attr(
16374            any(target_arch = "aarch64", target_arch = "arm64ec"),
16375            link_name = "llvm.aarch64.neon.sminp.v16i8"
16376        )]
16377        fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16378    }
16379    unsafe { _vpminq_s8(a, b) }
16380}
16381#[doc = "Folding minimum of adjacent pairs"]
16382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"]
16383#[inline(always)]
16384#[target_feature(enable = "neon")]
16385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16386#[cfg_attr(test, assert_instr(sminp))]
16387pub fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16388    unsafe extern "unadjusted" {
16389        #[cfg_attr(
16390            any(target_arch = "aarch64", target_arch = "arm64ec"),
16391            link_name = "llvm.aarch64.neon.sminp.v8i16"
16392        )]
16393        fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16394    }
16395    unsafe { _vpminq_s16(a, b) }
16396}
16397#[doc = "Folding minimum of adjacent pairs"]
16398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"]
16399#[inline(always)]
16400#[target_feature(enable = "neon")]
16401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16402#[cfg_attr(test, assert_instr(sminp))]
16403pub fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16404    unsafe extern "unadjusted" {
16405        #[cfg_attr(
16406            any(target_arch = "aarch64", target_arch = "arm64ec"),
16407            link_name = "llvm.aarch64.neon.sminp.v4i32"
16408        )]
16409        fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16410    }
16411    unsafe { _vpminq_s32(a, b) }
16412}
16413#[doc = "Folding minimum of adjacent pairs"]
16414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"]
16415#[inline(always)]
16416#[target_feature(enable = "neon")]
16417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16418#[cfg_attr(test, assert_instr(uminp))]
16419pub fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16420    unsafe extern "unadjusted" {
16421        #[cfg_attr(
16422            any(target_arch = "aarch64", target_arch = "arm64ec"),
16423            link_name = "llvm.aarch64.neon.uminp.v16i8"
16424        )]
16425        fn _vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16426    }
16427    unsafe { _vpminq_u8(a, b) }
16428}
16429#[doc = "Folding minimum of adjacent pairs"]
16430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"]
16431#[inline(always)]
16432#[target_feature(enable = "neon")]
16433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16434#[cfg_attr(test, assert_instr(uminp))]
16435pub fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16436    unsafe extern "unadjusted" {
16437        #[cfg_attr(
16438            any(target_arch = "aarch64", target_arch = "arm64ec"),
16439            link_name = "llvm.aarch64.neon.uminp.v8i16"
16440        )]
16441        fn _vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16442    }
16443    unsafe { _vpminq_u16(a, b) }
16444}
16445#[doc = "Folding minimum of adjacent pairs"]
16446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"]
16447#[inline(always)]
16448#[target_feature(enable = "neon")]
16449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16450#[cfg_attr(test, assert_instr(uminp))]
16451pub fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16452    unsafe extern "unadjusted" {
16453        #[cfg_attr(
16454            any(target_arch = "aarch64", target_arch = "arm64ec"),
16455            link_name = "llvm.aarch64.neon.uminp.v4i32"
16456        )]
16457        fn _vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16458    }
16459    unsafe { _vpminq_u32(a, b) }
16460}
16461#[doc = "Floating-point minimum pairwise"]
16462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"]
16463#[inline(always)]
16464#[target_feature(enable = "neon")]
16465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16466#[cfg_attr(test, assert_instr(fminp))]
16467pub fn vpminqd_f64(a: float64x2_t) -> f64 {
16468    unsafe extern "unadjusted" {
16469        #[cfg_attr(
16470            any(target_arch = "aarch64", target_arch = "arm64ec"),
16471            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
16472        )]
16473        fn _vpminqd_f64(a: float64x2_t) -> f64;
16474    }
16475    unsafe { _vpminqd_f64(a) }
16476}
16477#[doc = "Floating-point minimum pairwise"]
16478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"]
16479#[inline(always)]
16480#[target_feature(enable = "neon")]
16481#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16482#[cfg_attr(test, assert_instr(fminp))]
16483pub fn vpmins_f32(a: float32x2_t) -> f32 {
16484    unsafe extern "unadjusted" {
16485        #[cfg_attr(
16486            any(target_arch = "aarch64", target_arch = "arm64ec"),
16487            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
16488        )]
16489        fn _vpmins_f32(a: float32x2_t) -> f32;
16490    }
16491    unsafe { _vpmins_f32(a) }
16492}
16493#[doc = "Signed saturating Absolute value"]
16494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"]
16495#[inline(always)]
16496#[target_feature(enable = "neon")]
16497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16498#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16499pub fn vqabs_s64(a: int64x1_t) -> int64x1_t {
16500    unsafe extern "unadjusted" {
16501        #[cfg_attr(
16502            any(target_arch = "aarch64", target_arch = "arm64ec"),
16503            link_name = "llvm.aarch64.neon.sqabs.v1i64"
16504        )]
16505        fn _vqabs_s64(a: int64x1_t) -> int64x1_t;
16506    }
16507    unsafe { _vqabs_s64(a) }
16508}
16509#[doc = "Signed saturating Absolute value"]
16510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"]
16511#[inline(always)]
16512#[target_feature(enable = "neon")]
16513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16514#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16515pub fn vqabsq_s64(a: int64x2_t) -> int64x2_t {
16516    unsafe extern "unadjusted" {
16517        #[cfg_attr(
16518            any(target_arch = "aarch64", target_arch = "arm64ec"),
16519            link_name = "llvm.aarch64.neon.sqabs.v2i64"
16520        )]
16521        fn _vqabsq_s64(a: int64x2_t) -> int64x2_t;
16522    }
16523    unsafe { _vqabsq_s64(a) }
16524}
16525#[doc = "Signed saturating absolute value"]
16526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"]
16527#[inline(always)]
16528#[target_feature(enable = "neon")]
16529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16530#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16531pub fn vqabsb_s8(a: i8) -> i8 {
16532    unsafe { simd_extract!(vqabs_s8(vdup_n_s8(a)), 0) }
16533}
16534#[doc = "Signed saturating absolute value"]
16535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"]
16536#[inline(always)]
16537#[target_feature(enable = "neon")]
16538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16539#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16540pub fn vqabsh_s16(a: i16) -> i16 {
16541    unsafe { simd_extract!(vqabs_s16(vdup_n_s16(a)), 0) }
16542}
16543#[doc = "Signed saturating absolute value"]
16544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"]
16545#[inline(always)]
16546#[target_feature(enable = "neon")]
16547#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16548#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16549pub fn vqabss_s32(a: i32) -> i32 {
16550    unsafe extern "unadjusted" {
16551        #[cfg_attr(
16552            any(target_arch = "aarch64", target_arch = "arm64ec"),
16553            link_name = "llvm.aarch64.neon.sqabs.i32"
16554        )]
16555        fn _vqabss_s32(a: i32) -> i32;
16556    }
16557    unsafe { _vqabss_s32(a) }
16558}
16559#[doc = "Signed saturating absolute value"]
16560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"]
16561#[inline(always)]
16562#[target_feature(enable = "neon")]
16563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16564#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16565pub fn vqabsd_s64(a: i64) -> i64 {
16566    unsafe extern "unadjusted" {
16567        #[cfg_attr(
16568            any(target_arch = "aarch64", target_arch = "arm64ec"),
16569            link_name = "llvm.aarch64.neon.sqabs.i64"
16570        )]
16571        fn _vqabsd_s64(a: i64) -> i64;
16572    }
16573    unsafe { _vqabsd_s64(a) }
16574}
16575#[doc = "Saturating add"]
16576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"]
16577#[inline(always)]
16578#[target_feature(enable = "neon")]
16579#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16580#[cfg_attr(test, assert_instr(sqadd))]
16581pub fn vqaddb_s8(a: i8, b: i8) -> i8 {
16582    let a: int8x8_t = vdup_n_s8(a);
16583    let b: int8x8_t = vdup_n_s8(b);
16584    unsafe { simd_extract!(vqadd_s8(a, b), 0) }
16585}
16586#[doc = "Saturating add"]
16587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"]
16588#[inline(always)]
16589#[target_feature(enable = "neon")]
16590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16591#[cfg_attr(test, assert_instr(sqadd))]
16592pub fn vqaddh_s16(a: i16, b: i16) -> i16 {
16593    let a: int16x4_t = vdup_n_s16(a);
16594    let b: int16x4_t = vdup_n_s16(b);
16595    unsafe { simd_extract!(vqadd_s16(a, b), 0) }
16596}
16597#[doc = "Saturating add"]
16598#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"]
16599#[inline(always)]
16600#[target_feature(enable = "neon")]
16601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16602#[cfg_attr(test, assert_instr(uqadd))]
16603pub fn vqaddb_u8(a: u8, b: u8) -> u8 {
16604    let a: uint8x8_t = vdup_n_u8(a);
16605    let b: uint8x8_t = vdup_n_u8(b);
16606    unsafe { simd_extract!(vqadd_u8(a, b), 0) }
16607}
16608#[doc = "Saturating add"]
16609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"]
16610#[inline(always)]
16611#[target_feature(enable = "neon")]
16612#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16613#[cfg_attr(test, assert_instr(uqadd))]
16614pub fn vqaddh_u16(a: u16, b: u16) -> u16 {
16615    let a: uint16x4_t = vdup_n_u16(a);
16616    let b: uint16x4_t = vdup_n_u16(b);
16617    unsafe { simd_extract!(vqadd_u16(a, b), 0) }
16618}
16619#[doc = "Saturating add"]
16620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"]
16621#[inline(always)]
16622#[target_feature(enable = "neon")]
16623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16624#[cfg_attr(test, assert_instr(sqadd))]
16625pub fn vqadds_s32(a: i32, b: i32) -> i32 {
16626    unsafe extern "unadjusted" {
16627        #[cfg_attr(
16628            any(target_arch = "aarch64", target_arch = "arm64ec"),
16629            link_name = "llvm.aarch64.neon.sqadd.i32"
16630        )]
16631        fn _vqadds_s32(a: i32, b: i32) -> i32;
16632    }
16633    unsafe { _vqadds_s32(a, b) }
16634}
16635#[doc = "Saturating add"]
16636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"]
16637#[inline(always)]
16638#[target_feature(enable = "neon")]
16639#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16640#[cfg_attr(test, assert_instr(sqadd))]
16641pub fn vqaddd_s64(a: i64, b: i64) -> i64 {
16642    unsafe extern "unadjusted" {
16643        #[cfg_attr(
16644            any(target_arch = "aarch64", target_arch = "arm64ec"),
16645            link_name = "llvm.aarch64.neon.sqadd.i64"
16646        )]
16647        fn _vqaddd_s64(a: i64, b: i64) -> i64;
16648    }
16649    unsafe { _vqaddd_s64(a, b) }
16650}
16651#[doc = "Saturating add"]
16652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"]
16653#[inline(always)]
16654#[target_feature(enable = "neon")]
16655#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16656#[cfg_attr(test, assert_instr(uqadd))]
16657pub fn vqadds_u32(a: u32, b: u32) -> u32 {
16658    unsafe extern "unadjusted" {
16659        #[cfg_attr(
16660            any(target_arch = "aarch64", target_arch = "arm64ec"),
16661            link_name = "llvm.aarch64.neon.uqadd.i32"
16662        )]
16663        fn _vqadds_u32(a: u32, b: u32) -> u32;
16664    }
16665    unsafe { _vqadds_u32(a, b) }
16666}
16667#[doc = "Saturating add"]
16668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"]
16669#[inline(always)]
16670#[target_feature(enable = "neon")]
16671#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16672#[cfg_attr(test, assert_instr(uqadd))]
16673pub fn vqaddd_u64(a: u64, b: u64) -> u64 {
16674    unsafe extern "unadjusted" {
16675        #[cfg_attr(
16676            any(target_arch = "aarch64", target_arch = "arm64ec"),
16677            link_name = "llvm.aarch64.neon.uqadd.i64"
16678        )]
16679        fn _vqaddd_u64(a: u64, b: u64) -> u64;
16680    }
16681    unsafe { _vqaddd_u64(a, b) }
16682}
16683#[doc = "Signed saturating doubling multiply-add long"]
16684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"]
16685#[inline(always)]
16686#[target_feature(enable = "neon")]
16687#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16688#[rustc_legacy_const_generics(3)]
16689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16690pub fn vqdmlal_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
16691    static_assert_uimm_bits!(N, 2);
16692    vqaddq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
16693}
16694#[doc = "Signed saturating doubling multiply-add long"]
16695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"]
16696#[inline(always)]
16697#[target_feature(enable = "neon")]
16698#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16699#[rustc_legacy_const_generics(3)]
16700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16701pub fn vqdmlal_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16702    static_assert_uimm_bits!(N, 3);
16703    vqaddq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
16704}
16705#[doc = "Signed saturating doubling multiply-add long"]
16706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"]
16707#[inline(always)]
16708#[target_feature(enable = "neon")]
16709#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16710#[rustc_legacy_const_generics(3)]
16711#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16712pub fn vqdmlal_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
16713    static_assert_uimm_bits!(N, 1);
16714    vqaddq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
16715}
16716#[doc = "Signed saturating doubling multiply-add long"]
16717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"]
16718#[inline(always)]
16719#[target_feature(enable = "neon")]
16720#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
16721#[rustc_legacy_const_generics(3)]
16722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16723pub fn vqdmlal_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16724    static_assert_uimm_bits!(N, 2);
16725    vqaddq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
16726}
16727#[doc = "Signed saturating doubling multiply-add long"]
16728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"]
16729#[inline(always)]
16730#[target_feature(enable = "neon")]
16731#[cfg_attr(test, assert_instr(sqdmlal2))]
16732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16733pub fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
16734    vqaddq_s32(a, vqdmull_high_n_s16(b, c))
16735}
16736#[doc = "Signed saturating doubling multiply-add long"]
16737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"]
16738#[inline(always)]
16739#[target_feature(enable = "neon")]
16740#[cfg_attr(test, assert_instr(sqdmlal2))]
16741#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16742pub fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16743    vqaddq_s32(a, vqdmull_high_s16(b, c))
16744}
16745#[doc = "Signed saturating doubling multiply-add long"]
16746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"]
16747#[inline(always)]
16748#[target_feature(enable = "neon")]
16749#[cfg_attr(test, assert_instr(sqdmlal2))]
16750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16751pub fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
16752    vqaddq_s64(a, vqdmull_high_n_s32(b, c))
16753}
16754#[doc = "Signed saturating doubling multiply-add long"]
16755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"]
16756#[inline(always)]
16757#[target_feature(enable = "neon")]
16758#[cfg_attr(test, assert_instr(sqdmlal2))]
16759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16760pub fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16761    vqaddq_s64(a, vqdmull_high_s32(b, c))
16762}
16763#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
16764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"]
16765#[inline(always)]
16766#[target_feature(enable = "neon")]
16767#[cfg_attr(test, assert_instr(sqdmlal, N = 2))]
16768#[rustc_legacy_const_generics(3)]
16769#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16770pub fn vqdmlal_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
16771    static_assert_uimm_bits!(N, 3);
16772    vqaddq_s32(a, vqdmull_laneq_s16::<N>(b, c))
16773}
16774#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
16775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"]
16776#[inline(always)]
16777#[target_feature(enable = "neon")]
16778#[cfg_attr(test, assert_instr(sqdmlal, N = 1))]
16779#[rustc_legacy_const_generics(3)]
16780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16781pub fn vqdmlal_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
16782    static_assert_uimm_bits!(N, 2);
16783    vqaddq_s64(a, vqdmull_laneq_s32::<N>(b, c))
16784}
16785#[doc = "Signed saturating doubling multiply-add long"]
16786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"]
16787#[inline(always)]
16788#[target_feature(enable = "neon")]
16789#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16790#[rustc_legacy_const_generics(3)]
16791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16792pub fn vqdmlalh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
16793    static_assert_uimm_bits!(LANE, 2);
16794    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
16795}
16796#[doc = "Signed saturating doubling multiply-add long"]
16797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"]
16798#[inline(always)]
16799#[target_feature(enable = "neon")]
16800#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16801#[rustc_legacy_const_generics(3)]
16802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16803pub fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
16804    static_assert_uimm_bits!(LANE, 3);
16805    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
16806}
16807#[doc = "Signed saturating doubling multiply-add long"]
16808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"]
16809#[inline(always)]
16810#[target_feature(enable = "neon")]
16811#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16812#[rustc_legacy_const_generics(3)]
16813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16814pub fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
16815    static_assert_uimm_bits!(LANE, 1);
16816    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
16817}
16818#[doc = "Signed saturating doubling multiply-add long"]
16819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"]
16820#[inline(always)]
16821#[target_feature(enable = "neon")]
16822#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
16823#[rustc_legacy_const_generics(3)]
16824#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16825pub fn vqdmlals_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
16826    static_assert_uimm_bits!(LANE, 2);
16827    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
16828}
16829#[doc = "Signed saturating doubling multiply-add long"]
16830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"]
16831#[inline(always)]
16832#[target_feature(enable = "neon")]
16833#[cfg_attr(test, assert_instr(sqdmlal))]
16834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16835pub fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 {
16836    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
16837    unsafe { vqadds_s32(a, simd_extract!(x, 0)) }
16838}
16839#[doc = "Signed saturating doubling multiply-add long"]
16840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"]
16841#[inline(always)]
16842#[target_feature(enable = "neon")]
16843#[cfg_attr(test, assert_instr(sqdmlal))]
16844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16845pub fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 {
16846    let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c));
16847    x
16848}
16849#[doc = "Signed saturating doubling multiply-subtract long"]
16850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"]
16851#[inline(always)]
16852#[target_feature(enable = "neon")]
16853#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16854#[rustc_legacy_const_generics(3)]
16855#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16856pub fn vqdmlsl_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
16857    static_assert_uimm_bits!(N, 2);
16858    vqsubq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
16859}
16860#[doc = "Signed saturating doubling multiply-subtract long"]
16861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"]
16862#[inline(always)]
16863#[target_feature(enable = "neon")]
16864#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16865#[rustc_legacy_const_generics(3)]
16866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16867pub fn vqdmlsl_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16868    static_assert_uimm_bits!(N, 3);
16869    vqsubq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
16870}
16871#[doc = "Signed saturating doubling multiply-subtract long"]
16872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"]
16873#[inline(always)]
16874#[target_feature(enable = "neon")]
16875#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16876#[rustc_legacy_const_generics(3)]
16877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16878pub fn vqdmlsl_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
16879    static_assert_uimm_bits!(N, 1);
16880    vqsubq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
16881}
16882#[doc = "Signed saturating doubling multiply-subtract long"]
16883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"]
16884#[inline(always)]
16885#[target_feature(enable = "neon")]
16886#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
16887#[rustc_legacy_const_generics(3)]
16888#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16889pub fn vqdmlsl_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16890    static_assert_uimm_bits!(N, 2);
16891    vqsubq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
16892}
16893#[doc = "Signed saturating doubling multiply-subtract long"]
16894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"]
16895#[inline(always)]
16896#[target_feature(enable = "neon")]
16897#[cfg_attr(test, assert_instr(sqdmlsl2))]
16898#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16899pub fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
16900    vqsubq_s32(a, vqdmull_high_n_s16(b, c))
16901}
16902#[doc = "Signed saturating doubling multiply-subtract long"]
16903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"]
16904#[inline(always)]
16905#[target_feature(enable = "neon")]
16906#[cfg_attr(test, assert_instr(sqdmlsl2))]
16907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16908pub fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
16909    vqsubq_s32(a, vqdmull_high_s16(b, c))
16910}
16911#[doc = "Signed saturating doubling multiply-subtract long"]
16912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"]
16913#[inline(always)]
16914#[target_feature(enable = "neon")]
16915#[cfg_attr(test, assert_instr(sqdmlsl2))]
16916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16917pub fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
16918    vqsubq_s64(a, vqdmull_high_n_s32(b, c))
16919}
16920#[doc = "Signed saturating doubling multiply-subtract long"]
16921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"]
16922#[inline(always)]
16923#[target_feature(enable = "neon")]
16924#[cfg_attr(test, assert_instr(sqdmlsl2))]
16925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16926pub fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
16927    vqsubq_s64(a, vqdmull_high_s32(b, c))
16928}
16929#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
16930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"]
16931#[inline(always)]
16932#[target_feature(enable = "neon")]
16933#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))]
16934#[rustc_legacy_const_generics(3)]
16935#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16936pub fn vqdmlsl_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
16937    static_assert_uimm_bits!(N, 3);
16938    vqsubq_s32(a, vqdmull_laneq_s16::<N>(b, c))
16939}
16940#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
16941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"]
16942#[inline(always)]
16943#[target_feature(enable = "neon")]
16944#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))]
16945#[rustc_legacy_const_generics(3)]
16946#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16947pub fn vqdmlsl_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
16948    static_assert_uimm_bits!(N, 2);
16949    vqsubq_s64(a, vqdmull_laneq_s32::<N>(b, c))
16950}
16951#[doc = "Signed saturating doubling multiply-subtract long"]
16952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"]
16953#[inline(always)]
16954#[target_feature(enable = "neon")]
16955#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
16956#[rustc_legacy_const_generics(3)]
16957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16958pub fn vqdmlslh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
16959    static_assert_uimm_bits!(LANE, 2);
16960    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
16961}
16962#[doc = "Signed saturating doubling multiply-subtract long"]
16963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"]
16964#[inline(always)]
16965#[target_feature(enable = "neon")]
16966#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
16967#[rustc_legacy_const_generics(3)]
16968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16969pub fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
16970    static_assert_uimm_bits!(LANE, 3);
16971    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
16972}
16973#[doc = "Signed saturating doubling multiply-subtract long"]
16974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"]
16975#[inline(always)]
16976#[target_feature(enable = "neon")]
16977#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
16978#[rustc_legacy_const_generics(3)]
16979#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16980pub fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
16981    static_assert_uimm_bits!(LANE, 1);
16982    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
16983}
16984#[doc = "Signed saturating doubling multiply-subtract long"]
16985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"]
16986#[inline(always)]
16987#[target_feature(enable = "neon")]
16988#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
16989#[rustc_legacy_const_generics(3)]
16990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16991pub fn vqdmlsls_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
16992    static_assert_uimm_bits!(LANE, 2);
16993    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
16994}
16995#[doc = "Signed saturating doubling multiply-subtract long"]
16996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"]
16997#[inline(always)]
16998#[target_feature(enable = "neon")]
16999#[cfg_attr(test, assert_instr(sqdmlsl))]
17000#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17001pub fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 {
17002    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
17003    unsafe { vqsubs_s32(a, simd_extract!(x, 0)) }
17004}
17005#[doc = "Signed saturating doubling multiply-subtract long"]
17006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"]
17007#[inline(always)]
17008#[target_feature(enable = "neon")]
17009#[cfg_attr(test, assert_instr(sqdmlsl))]
17010#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17011pub fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 {
17012    let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c));
17013    x
17014}
17015#[doc = "Vector saturating doubling multiply high by scalar"]
17016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"]
17017#[inline(always)]
17018#[target_feature(enable = "neon")]
17019#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17020#[rustc_legacy_const_generics(2)]
17021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17022pub fn vqdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
17023    static_assert_uimm_bits!(LANE, 2);
17024    unsafe { vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) }
17025}
17026#[doc = "Vector saturating doubling multiply high by scalar"]
17027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"]
17028#[inline(always)]
17029#[target_feature(enable = "neon")]
17030#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17031#[rustc_legacy_const_generics(2)]
17032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17033pub fn vqdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
17034    static_assert_uimm_bits!(LANE, 2);
17035    unsafe { vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) }
17036}
17037#[doc = "Vector saturating doubling multiply high by scalar"]
17038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"]
17039#[inline(always)]
17040#[target_feature(enable = "neon")]
17041#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17042#[rustc_legacy_const_generics(2)]
17043#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17044pub fn vqdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
17045    static_assert_uimm_bits!(LANE, 1);
17046    unsafe { vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) }
17047}
17048#[doc = "Vector saturating doubling multiply high by scalar"]
17049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"]
17050#[inline(always)]
17051#[target_feature(enable = "neon")]
17052#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17053#[rustc_legacy_const_generics(2)]
17054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17055pub fn vqdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
17056    static_assert_uimm_bits!(LANE, 1);
17057    unsafe { vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) }
17058}
17059#[doc = "Signed saturating doubling multiply returning high half"]
17060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"]
17061#[inline(always)]
17062#[target_feature(enable = "neon")]
17063#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17064#[rustc_legacy_const_generics(2)]
17065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17066pub fn vqdmulhh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i16 {
17067    static_assert_uimm_bits!(N, 2);
17068    unsafe {
17069        let b: i16 = simd_extract!(b, N as u32);
17070        vqdmulhh_s16(a, b)
17071    }
17072}
17073#[doc = "Signed saturating doubling multiply returning high half"]
17074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"]
17075#[inline(always)]
17076#[target_feature(enable = "neon")]
17077#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17078#[rustc_legacy_const_generics(2)]
17079#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17080pub fn vqdmulhh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i16 {
17081    static_assert_uimm_bits!(N, 3);
17082    unsafe {
17083        let b: i16 = simd_extract!(b, N as u32);
17084        vqdmulhh_s16(a, b)
17085    }
17086}
17087#[doc = "Signed saturating doubling multiply returning high half"]
17088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"]
17089#[inline(always)]
17090#[target_feature(enable = "neon")]
17091#[cfg_attr(test, assert_instr(sqdmulh))]
17092#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17093pub fn vqdmulhh_s16(a: i16, b: i16) -> i16 {
17094    let a: int16x4_t = vdup_n_s16(a);
17095    let b: int16x4_t = vdup_n_s16(b);
17096    unsafe { simd_extract!(vqdmulh_s16(a, b), 0) }
17097}
17098#[doc = "Signed saturating doubling multiply returning high half"]
17099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"]
17100#[inline(always)]
17101#[target_feature(enable = "neon")]
17102#[cfg_attr(test, assert_instr(sqdmulh))]
17103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17104pub fn vqdmulhs_s32(a: i32, b: i32) -> i32 {
17105    let a: int32x2_t = vdup_n_s32(a);
17106    let b: int32x2_t = vdup_n_s32(b);
17107    unsafe { simd_extract!(vqdmulh_s32(a, b), 0) }
17108}
17109#[doc = "Signed saturating doubling multiply returning high half"]
17110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"]
17111#[inline(always)]
17112#[target_feature(enable = "neon")]
17113#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17114#[rustc_legacy_const_generics(2)]
17115#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17116pub fn vqdmulhs_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i32 {
17117    static_assert_uimm_bits!(N, 1);
17118    unsafe {
17119        let b: i32 = simd_extract!(b, N as u32);
17120        vqdmulhs_s32(a, b)
17121    }
17122}
17123#[doc = "Signed saturating doubling multiply returning high half"]
17124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"]
17125#[inline(always)]
17126#[target_feature(enable = "neon")]
17127#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17128#[rustc_legacy_const_generics(2)]
17129#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17130pub fn vqdmulhs_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i32 {
17131    static_assert_uimm_bits!(N, 2);
17132    unsafe {
17133        let b: i32 = simd_extract!(b, N as u32);
17134        vqdmulhs_s32(a, b)
17135    }
17136}
17137#[doc = "Signed saturating doubling multiply long"]
17138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"]
17139#[inline(always)]
17140#[target_feature(enable = "neon")]
17141#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17142#[rustc_legacy_const_generics(2)]
17143#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17144pub fn vqdmull_high_lane_s16<const N: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
17145    static_assert_uimm_bits!(N, 2);
17146    unsafe {
17147        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17148        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17149        vqdmull_s16(a, b)
17150    }
17151}
17152#[doc = "Signed saturating doubling multiply long"]
17153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"]
17154#[inline(always)]
17155#[target_feature(enable = "neon")]
17156#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17157#[rustc_legacy_const_generics(2)]
17158#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17159pub fn vqdmull_high_laneq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17160    static_assert_uimm_bits!(N, 2);
17161    unsafe {
17162        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17163        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17164        vqdmull_s32(a, b)
17165    }
17166}
17167#[doc = "Signed saturating doubling multiply long"]
17168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"]
17169#[inline(always)]
17170#[target_feature(enable = "neon")]
17171#[cfg_attr(test, assert_instr(sqdmull2, N = 1))]
17172#[rustc_legacy_const_generics(2)]
17173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17174pub fn vqdmull_high_lane_s32<const N: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
17175    static_assert_uimm_bits!(N, 1);
17176    unsafe {
17177        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17178        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17179        vqdmull_s32(a, b)
17180    }
17181}
17182#[doc = "Signed saturating doubling multiply long"]
17183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"]
17184#[inline(always)]
17185#[target_feature(enable = "neon")]
17186#[cfg_attr(test, assert_instr(sqdmull2, N = 4))]
17187#[rustc_legacy_const_generics(2)]
17188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17189pub fn vqdmull_high_laneq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17190    static_assert_uimm_bits!(N, 3);
17191    unsafe {
17192        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17193        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17194        vqdmull_s16(a, b)
17195    }
17196}
17197#[doc = "Signed saturating doubling multiply long"]
17198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"]
17199#[inline(always)]
17200#[target_feature(enable = "neon")]
17201#[cfg_attr(test, assert_instr(sqdmull2))]
17202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17203pub fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
17204    unsafe {
17205        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17206        let b: int16x4_t = vdup_n_s16(b);
17207        vqdmull_s16(a, b)
17208    }
17209}
17210#[doc = "Signed saturating doubling multiply long"]
17211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"]
17212#[inline(always)]
17213#[target_feature(enable = "neon")]
17214#[cfg_attr(test, assert_instr(sqdmull2))]
17215#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17216pub fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
17217    unsafe {
17218        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17219        let b: int32x2_t = vdup_n_s32(b);
17220        vqdmull_s32(a, b)
17221    }
17222}
17223#[doc = "Signed saturating doubling multiply long"]
17224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"]
17225#[inline(always)]
17226#[target_feature(enable = "neon")]
17227#[cfg_attr(test, assert_instr(sqdmull2))]
17228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17229pub fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17230    unsafe {
17231        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17232        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
17233        vqdmull_s16(a, b)
17234    }
17235}
17236#[doc = "Signed saturating doubling multiply long"]
17237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"]
17238#[inline(always)]
17239#[target_feature(enable = "neon")]
17240#[cfg_attr(test, assert_instr(sqdmull2))]
17241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17242pub fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17243    unsafe {
17244        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17245        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
17246        vqdmull_s32(a, b)
17247    }
17248}
17249#[doc = "Vector saturating doubling long multiply by scalar"]
17250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"]
17251#[inline(always)]
17252#[target_feature(enable = "neon")]
17253#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17254#[rustc_legacy_const_generics(2)]
17255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17256pub fn vqdmull_laneq_s16<const N: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t {
17257    static_assert_uimm_bits!(N, 3);
17258    unsafe {
17259        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17260        vqdmull_s16(a, b)
17261    }
17262}
17263#[doc = "Vector saturating doubling long multiply by scalar"]
17264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"]
17265#[inline(always)]
17266#[target_feature(enable = "neon")]
17267#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17268#[rustc_legacy_const_generics(2)]
17269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17270pub fn vqdmull_laneq_s32<const N: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t {
17271    static_assert_uimm_bits!(N, 2);
17272    unsafe {
17273        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17274        vqdmull_s32(a, b)
17275    }
17276}
17277#[doc = "Signed saturating doubling multiply long"]
17278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"]
17279#[inline(always)]
17280#[target_feature(enable = "neon")]
17281#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17282#[rustc_legacy_const_generics(2)]
17283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17284pub fn vqdmullh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i32 {
17285    static_assert_uimm_bits!(N, 2);
17286    unsafe {
17287        let b: i16 = simd_extract!(b, N as u32);
17288        vqdmullh_s16(a, b)
17289    }
17290}
17291#[doc = "Signed saturating doubling multiply long"]
17292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"]
17293#[inline(always)]
17294#[target_feature(enable = "neon")]
17295#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17296#[rustc_legacy_const_generics(2)]
17297#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17298pub fn vqdmulls_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i64 {
17299    static_assert_uimm_bits!(N, 2);
17300    unsafe {
17301        let b: i32 = simd_extract!(b, N as u32);
17302        vqdmulls_s32(a, b)
17303    }
17304}
17305#[doc = "Signed saturating doubling multiply long"]
17306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"]
17307#[inline(always)]
17308#[target_feature(enable = "neon")]
17309#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17310#[rustc_legacy_const_generics(2)]
17311#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17312pub fn vqdmullh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i32 {
17313    static_assert_uimm_bits!(N, 3);
17314    unsafe {
17315        let b: i16 = simd_extract!(b, N as u32);
17316        vqdmullh_s16(a, b)
17317    }
17318}
17319#[doc = "Signed saturating doubling multiply long"]
17320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"]
17321#[inline(always)]
17322#[target_feature(enable = "neon")]
17323#[cfg_attr(test, assert_instr(sqdmull))]
17324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17325pub fn vqdmullh_s16(a: i16, b: i16) -> i32 {
17326    let a: int16x4_t = vdup_n_s16(a);
17327    let b: int16x4_t = vdup_n_s16(b);
17328    unsafe { simd_extract!(vqdmull_s16(a, b), 0) }
17329}
17330#[doc = "Signed saturating doubling multiply long"]
17331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"]
17332#[inline(always)]
17333#[target_feature(enable = "neon")]
17334#[cfg_attr(test, assert_instr(sqdmull, N = 1))]
17335#[rustc_legacy_const_generics(2)]
17336#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17337pub fn vqdmulls_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i64 {
17338    static_assert_uimm_bits!(N, 1);
17339    unsafe {
17340        let b: i32 = simd_extract!(b, N as u32);
17341        vqdmulls_s32(a, b)
17342    }
17343}
17344#[doc = "Signed saturating doubling multiply long"]
17345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"]
17346#[inline(always)]
17347#[target_feature(enable = "neon")]
17348#[cfg_attr(test, assert_instr(sqdmull))]
17349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17350pub fn vqdmulls_s32(a: i32, b: i32) -> i64 {
17351    unsafe extern "unadjusted" {
17352        #[cfg_attr(
17353            any(target_arch = "aarch64", target_arch = "arm64ec"),
17354            link_name = "llvm.aarch64.neon.sqdmulls.scalar"
17355        )]
17356        fn _vqdmulls_s32(a: i32, b: i32) -> i64;
17357    }
17358    unsafe { _vqdmulls_s32(a, b) }
17359}
17360#[doc = "Signed saturating extract narrow"]
17361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"]
17362#[inline(always)]
17363#[target_feature(enable = "neon")]
17364#[cfg_attr(test, assert_instr(sqxtn2))]
17365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17366pub fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
17367    unsafe {
17368        simd_shuffle!(
17369            a,
17370            vqmovn_s16(b),
17371            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17372        )
17373    }
17374}
17375#[doc = "Signed saturating extract narrow"]
17376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"]
17377#[inline(always)]
17378#[target_feature(enable = "neon")]
17379#[cfg_attr(test, assert_instr(sqxtn2))]
17380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17381pub fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
17382    unsafe { simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17383}
17384#[doc = "Signed saturating extract narrow"]
17385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"]
17386#[inline(always)]
17387#[target_feature(enable = "neon")]
17388#[cfg_attr(test, assert_instr(sqxtn2))]
17389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17390pub fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
17391    unsafe { simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]) }
17392}
17393#[doc = "Signed saturating extract narrow"]
17394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"]
17395#[inline(always)]
17396#[target_feature(enable = "neon")]
17397#[cfg_attr(test, assert_instr(uqxtn2))]
17398#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17399pub fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
17400    unsafe {
17401        simd_shuffle!(
17402            a,
17403            vqmovn_u16(b),
17404            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17405        )
17406    }
17407}
17408#[doc = "Signed saturating extract narrow"]
17409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"]
17410#[inline(always)]
17411#[target_feature(enable = "neon")]
17412#[cfg_attr(test, assert_instr(uqxtn2))]
17413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17414pub fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
17415    unsafe { simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17416}
17417#[doc = "Signed saturating extract narrow"]
17418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"]
17419#[inline(always)]
17420#[target_feature(enable = "neon")]
17421#[cfg_attr(test, assert_instr(uqxtn2))]
17422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17423pub fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
17424    unsafe { simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]) }
17425}
17426#[doc = "Saturating extract narrow"]
17427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"]
17428#[inline(always)]
17429#[target_feature(enable = "neon")]
17430#[cfg_attr(test, assert_instr(sqxtn))]
17431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17432pub fn vqmovnd_s64(a: i64) -> i32 {
17433    unsafe extern "unadjusted" {
17434        #[cfg_attr(
17435            any(target_arch = "aarch64", target_arch = "arm64ec"),
17436            link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64"
17437        )]
17438        fn _vqmovnd_s64(a: i64) -> i32;
17439    }
17440    unsafe { _vqmovnd_s64(a) }
17441}
17442#[doc = "Saturating extract narrow"]
17443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"]
17444#[inline(always)]
17445#[target_feature(enable = "neon")]
17446#[cfg_attr(test, assert_instr(uqxtn))]
17447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17448pub fn vqmovnd_u64(a: u64) -> u32 {
17449    unsafe extern "unadjusted" {
17450        #[cfg_attr(
17451            any(target_arch = "aarch64", target_arch = "arm64ec"),
17452            link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64"
17453        )]
17454        fn _vqmovnd_u64(a: u64) -> u32;
17455    }
17456    unsafe { _vqmovnd_u64(a) }
17457}
17458#[doc = "Saturating extract narrow"]
17459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"]
17460#[inline(always)]
17461#[target_feature(enable = "neon")]
17462#[cfg_attr(test, assert_instr(sqxtn))]
17463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17464pub fn vqmovnh_s16(a: i16) -> i8 {
17465    unsafe { simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0) }
17466}
17467#[doc = "Saturating extract narrow"]
17468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"]
17469#[inline(always)]
17470#[target_feature(enable = "neon")]
17471#[cfg_attr(test, assert_instr(sqxtn))]
17472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17473pub fn vqmovns_s32(a: i32) -> i16 {
17474    unsafe { simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0) }
17475}
17476#[doc = "Saturating extract narrow"]
17477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"]
17478#[inline(always)]
17479#[target_feature(enable = "neon")]
17480#[cfg_attr(test, assert_instr(uqxtn))]
17481#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17482pub fn vqmovnh_u16(a: u16) -> u8 {
17483    unsafe { simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0) }
17484}
17485#[doc = "Saturating extract narrow"]
17486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"]
17487#[inline(always)]
17488#[target_feature(enable = "neon")]
17489#[cfg_attr(test, assert_instr(uqxtn))]
17490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17491pub fn vqmovns_u32(a: u32) -> u16 {
17492    unsafe { simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0) }
17493}
17494#[doc = "Signed saturating extract unsigned narrow"]
17495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"]
17496#[inline(always)]
17497#[target_feature(enable = "neon")]
17498#[cfg_attr(test, assert_instr(sqxtun2))]
17499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17500pub fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
17501    unsafe {
17502        simd_shuffle!(
17503            a,
17504            vqmovun_s16(b),
17505            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17506        )
17507    }
17508}
17509#[doc = "Signed saturating extract unsigned narrow"]
17510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"]
17511#[inline(always)]
17512#[target_feature(enable = "neon")]
17513#[cfg_attr(test, assert_instr(sqxtun2))]
17514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17515pub fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
17516    unsafe { simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17517}
17518#[doc = "Signed saturating extract unsigned narrow"]
17519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"]
17520#[inline(always)]
17521#[target_feature(enable = "neon")]
17522#[cfg_attr(test, assert_instr(sqxtun2))]
17523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17524pub fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
17525    unsafe { simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]) }
17526}
17527#[doc = "Signed saturating extract unsigned narrow"]
17528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"]
17529#[inline(always)]
17530#[target_feature(enable = "neon")]
17531#[cfg_attr(test, assert_instr(sqxtun))]
17532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17533pub fn vqmovunh_s16(a: i16) -> u8 {
17534    unsafe { simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0) }
17535}
17536#[doc = "Signed saturating extract unsigned narrow"]
17537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"]
17538#[inline(always)]
17539#[target_feature(enable = "neon")]
17540#[cfg_attr(test, assert_instr(sqxtun))]
17541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17542pub fn vqmovuns_s32(a: i32) -> u16 {
17543    unsafe { simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0) }
17544}
17545#[doc = "Signed saturating extract unsigned narrow"]
17546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"]
17547#[inline(always)]
17548#[target_feature(enable = "neon")]
17549#[cfg_attr(test, assert_instr(sqxtun))]
17550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17551pub fn vqmovund_s64(a: i64) -> u32 {
17552    unsafe { simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0) }
17553}
17554#[doc = "Signed saturating negate"]
17555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"]
17556#[inline(always)]
17557#[target_feature(enable = "neon")]
17558#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17559#[cfg_attr(test, assert_instr(sqneg))]
17560pub fn vqneg_s64(a: int64x1_t) -> int64x1_t {
17561    unsafe extern "unadjusted" {
17562        #[cfg_attr(
17563            any(target_arch = "aarch64", target_arch = "arm64ec"),
17564            link_name = "llvm.aarch64.neon.sqneg.v1i64"
17565        )]
17566        fn _vqneg_s64(a: int64x1_t) -> int64x1_t;
17567    }
17568    unsafe { _vqneg_s64(a) }
17569}
17570#[doc = "Signed saturating negate"]
17571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"]
17572#[inline(always)]
17573#[target_feature(enable = "neon")]
17574#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17575#[cfg_attr(test, assert_instr(sqneg))]
17576pub fn vqnegq_s64(a: int64x2_t) -> int64x2_t {
17577    unsafe extern "unadjusted" {
17578        #[cfg_attr(
17579            any(target_arch = "aarch64", target_arch = "arm64ec"),
17580            link_name = "llvm.aarch64.neon.sqneg.v2i64"
17581        )]
17582        fn _vqnegq_s64(a: int64x2_t) -> int64x2_t;
17583    }
17584    unsafe { _vqnegq_s64(a) }
17585}
17586#[doc = "Signed saturating negate"]
17587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"]
17588#[inline(always)]
17589#[target_feature(enable = "neon")]
17590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17591#[cfg_attr(test, assert_instr(sqneg))]
17592pub fn vqnegb_s8(a: i8) -> i8 {
17593    unsafe { simd_extract!(vqneg_s8(vdup_n_s8(a)), 0) }
17594}
17595#[doc = "Signed saturating negate"]
17596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"]
17597#[inline(always)]
17598#[target_feature(enable = "neon")]
17599#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17600#[cfg_attr(test, assert_instr(sqneg))]
17601pub fn vqnegh_s16(a: i16) -> i16 {
17602    unsafe { simd_extract!(vqneg_s16(vdup_n_s16(a)), 0) }
17603}
17604#[doc = "Signed saturating negate"]
17605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"]
17606#[inline(always)]
17607#[target_feature(enable = "neon")]
17608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17609#[cfg_attr(test, assert_instr(sqneg))]
17610pub fn vqnegs_s32(a: i32) -> i32 {
17611    unsafe { simd_extract!(vqneg_s32(vdup_n_s32(a)), 0) }
17612}
17613#[doc = "Signed saturating negate"]
17614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"]
17615#[inline(always)]
17616#[target_feature(enable = "neon")]
17617#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17618#[cfg_attr(test, assert_instr(sqneg))]
17619pub fn vqnegd_s64(a: i64) -> i64 {
17620    unsafe { simd_extract!(vqneg_s64(vdup_n_s64(a)), 0) }
17621}
17622#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"]
17624#[inline(always)]
17625#[target_feature(enable = "rdm")]
17626#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17627#[rustc_legacy_const_generics(3)]
17628#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17629pub fn vqrdmlah_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17630    static_assert_uimm_bits!(LANE, 2);
17631    unsafe {
17632        let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17633        vqrdmlah_s16(a, b, c)
17634    }
17635}
17636#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"]
17638#[inline(always)]
17639#[target_feature(enable = "rdm")]
17640#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17641#[rustc_legacy_const_generics(3)]
17642#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17643pub fn vqrdmlah_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17644    static_assert_uimm_bits!(LANE, 1);
17645    unsafe {
17646        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32; 2]);
17647        vqrdmlah_s32(a, b, c)
17648    }
17649}
17650#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"]
17652#[inline(always)]
17653#[target_feature(enable = "rdm")]
17654#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17655#[rustc_legacy_const_generics(3)]
17656#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17657pub fn vqrdmlah_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
17658    static_assert_uimm_bits!(LANE, 3);
17659    unsafe {
17660        let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17661        vqrdmlah_s16(a, b, c)
17662    }
17663}
17664#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"]
17666#[inline(always)]
17667#[target_feature(enable = "rdm")]
17668#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17669#[rustc_legacy_const_generics(3)]
17670#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17671pub fn vqrdmlah_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
17672    static_assert_uimm_bits!(LANE, 2);
17673    unsafe {
17674        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32; 2]);
17675        vqrdmlah_s32(a, b, c)
17676    }
17677}
17678#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"]
17680#[inline(always)]
17681#[target_feature(enable = "rdm")]
17682#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17683#[rustc_legacy_const_generics(3)]
17684#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17685pub fn vqrdmlahq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
17686    static_assert_uimm_bits!(LANE, 2);
17687    unsafe {
17688        let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32; 8]);
17689        vqrdmlahq_s16(a, b, c)
17690    }
17691}
17692#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"]
17694#[inline(always)]
17695#[target_feature(enable = "rdm")]
17696#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17697#[rustc_legacy_const_generics(3)]
17698#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17699pub fn vqrdmlahq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
17700    static_assert_uimm_bits!(LANE, 1);
17701    unsafe {
17702        let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17703        vqrdmlahq_s32(a, b, c)
17704    }
17705}
17706#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"]
17708#[inline(always)]
17709#[target_feature(enable = "rdm")]
17710#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17711#[rustc_legacy_const_generics(3)]
17712#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17713pub fn vqrdmlahq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17714    static_assert_uimm_bits!(LANE, 3);
17715    unsafe {
17716        let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32; 8]);
17717        vqrdmlahq_s16(a, b, c)
17718    }
17719}
17720#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"]
17722#[inline(always)]
17723#[target_feature(enable = "rdm")]
17724#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17725#[rustc_legacy_const_generics(3)]
17726#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17727pub fn vqrdmlahq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
17728    static_assert_uimm_bits!(LANE, 2);
17729    unsafe {
17730        let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17731        vqrdmlahq_s32(a, b, c)
17732    }
17733}
17734#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"]
17736#[inline(always)]
17737#[target_feature(enable = "rdm")]
17738#[cfg_attr(test, assert_instr(sqrdmlah))]
17739#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17740pub fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17741    unsafe extern "unadjusted" {
17742        #[cfg_attr(
17743            any(target_arch = "aarch64", target_arch = "arm64ec"),
17744            link_name = "llvm.aarch64.neon.sqrdmlah.v4i16"
17745        )]
17746        fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
17747    }
17748    unsafe { _vqrdmlah_s16(a, b, c) }
17749}
17750#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"]
17752#[inline(always)]
17753#[target_feature(enable = "rdm")]
17754#[cfg_attr(test, assert_instr(sqrdmlah))]
17755#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17756pub fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17757    unsafe extern "unadjusted" {
17758        #[cfg_attr(
17759            any(target_arch = "aarch64", target_arch = "arm64ec"),
17760            link_name = "llvm.aarch64.neon.sqrdmlah.v8i16"
17761        )]
17762        fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
17763    }
17764    unsafe { _vqrdmlahq_s16(a, b, c) }
17765}
17766#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"]
17768#[inline(always)]
17769#[target_feature(enable = "rdm")]
17770#[cfg_attr(test, assert_instr(sqrdmlah))]
17771#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17772pub fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17773    unsafe extern "unadjusted" {
17774        #[cfg_attr(
17775            any(target_arch = "aarch64", target_arch = "arm64ec"),
17776            link_name = "llvm.aarch64.neon.sqrdmlah.v2i32"
17777        )]
17778        fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
17779    }
17780    unsafe { _vqrdmlah_s32(a, b, c) }
17781}
17782#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"]
17784#[inline(always)]
17785#[target_feature(enable = "rdm")]
17786#[cfg_attr(test, assert_instr(sqrdmlah))]
17787#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17788pub fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
17789    unsafe extern "unadjusted" {
17790        #[cfg_attr(
17791            any(target_arch = "aarch64", target_arch = "arm64ec"),
17792            link_name = "llvm.aarch64.neon.sqrdmlah.v4i32"
17793        )]
17794        fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
17795    }
17796    unsafe { _vqrdmlahq_s32(a, b, c) }
17797}
17798#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"]
17800#[inline(always)]
17801#[target_feature(enable = "rdm")]
17802#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17803#[rustc_legacy_const_generics(3)]
17804#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17805pub fn vqrdmlahh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
17806    static_assert_uimm_bits!(LANE, 2);
17807    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
17808}
17809#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"]
17811#[inline(always)]
17812#[target_feature(enable = "rdm")]
17813#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17814#[rustc_legacy_const_generics(3)]
17815#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17816pub fn vqrdmlahh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
17817    static_assert_uimm_bits!(LANE, 3);
17818    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
17819}
17820#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"]
17822#[inline(always)]
17823#[target_feature(enable = "rdm")]
17824#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17825#[rustc_legacy_const_generics(3)]
17826#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17827pub fn vqrdmlahs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
17828    static_assert_uimm_bits!(LANE, 1);
17829    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
17830}
17831#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"]
17833#[inline(always)]
17834#[target_feature(enable = "rdm")]
17835#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
17836#[rustc_legacy_const_generics(3)]
17837#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17838pub fn vqrdmlahs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
17839    static_assert_uimm_bits!(LANE, 2);
17840    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
17841}
17842#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"]
17844#[inline(always)]
17845#[target_feature(enable = "rdm")]
17846#[cfg_attr(test, assert_instr(sqrdmlah))]
17847#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17848pub fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 {
17849    let a: int16x4_t = vdup_n_s16(a);
17850    let b: int16x4_t = vdup_n_s16(b);
17851    let c: int16x4_t = vdup_n_s16(c);
17852    unsafe { simd_extract!(vqrdmlah_s16(a, b, c), 0) }
17853}
17854#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
17855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"]
17856#[inline(always)]
17857#[target_feature(enable = "rdm")]
17858#[cfg_attr(test, assert_instr(sqrdmlah))]
17859#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17860pub fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 {
17861    let a: int32x2_t = vdup_n_s32(a);
17862    let b: int32x2_t = vdup_n_s32(b);
17863    let c: int32x2_t = vdup_n_s32(c);
17864    unsafe { simd_extract!(vqrdmlah_s32(a, b, c), 0) }
17865}
17866#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"]
17868#[inline(always)]
17869#[target_feature(enable = "rdm")]
17870#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17871#[rustc_legacy_const_generics(3)]
17872#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17873pub fn vqrdmlsh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17874    static_assert_uimm_bits!(LANE, 2);
17875    unsafe {
17876        let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17877        vqrdmlsh_s16(a, b, c)
17878    }
17879}
17880#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"]
17882#[inline(always)]
17883#[target_feature(enable = "rdm")]
17884#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17885#[rustc_legacy_const_generics(3)]
17886#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17887pub fn vqrdmlsh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
17888    static_assert_uimm_bits!(LANE, 1);
17889    unsafe {
17890        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32; 2]);
17891        vqrdmlsh_s32(a, b, c)
17892    }
17893}
17894#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"]
17896#[inline(always)]
17897#[target_feature(enable = "rdm")]
17898#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17899#[rustc_legacy_const_generics(3)]
17900#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17901pub fn vqrdmlsh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
17902    static_assert_uimm_bits!(LANE, 3);
17903    unsafe {
17904        let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17905        vqrdmlsh_s16(a, b, c)
17906    }
17907}
17908#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"]
17910#[inline(always)]
17911#[target_feature(enable = "rdm")]
17912#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17913#[rustc_legacy_const_generics(3)]
17914#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17915pub fn vqrdmlsh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
17916    static_assert_uimm_bits!(LANE, 2);
17917    unsafe {
17918        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32; 2]);
17919        vqrdmlsh_s32(a, b, c)
17920    }
17921}
17922#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"]
17924#[inline(always)]
17925#[target_feature(enable = "rdm")]
17926#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17927#[rustc_legacy_const_generics(3)]
17928#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17929pub fn vqrdmlshq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
17930    static_assert_uimm_bits!(LANE, 2);
17931    unsafe {
17932        let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32; 8]);
17933        vqrdmlshq_s16(a, b, c)
17934    }
17935}
17936#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"]
17938#[inline(always)]
17939#[target_feature(enable = "rdm")]
17940#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17941#[rustc_legacy_const_generics(3)]
17942#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17943pub fn vqrdmlshq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
17944    static_assert_uimm_bits!(LANE, 1);
17945    unsafe {
17946        let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17947        vqrdmlshq_s32(a, b, c)
17948    }
17949}
17950#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"]
17952#[inline(always)]
17953#[target_feature(enable = "rdm")]
17954#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17955#[rustc_legacy_const_generics(3)]
17956#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17957pub fn vqrdmlshq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
17958    static_assert_uimm_bits!(LANE, 3);
17959    unsafe {
17960        let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32; 8]);
17961        vqrdmlshq_s16(a, b, c)
17962    }
17963}
17964#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"]
17966#[inline(always)]
17967#[target_feature(enable = "rdm")]
17968#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
17969#[rustc_legacy_const_generics(3)]
17970#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17971pub fn vqrdmlshq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
17972    static_assert_uimm_bits!(LANE, 2);
17973    unsafe {
17974        let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32; 4]);
17975        vqrdmlshq_s32(a, b, c)
17976    }
17977}
17978#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"]
17980#[inline(always)]
17981#[target_feature(enable = "rdm")]
17982#[cfg_attr(test, assert_instr(sqrdmlsh))]
17983#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
17984pub fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
17985    unsafe extern "unadjusted" {
17986        #[cfg_attr(
17987            any(target_arch = "aarch64", target_arch = "arm64ec"),
17988            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16"
17989        )]
17990        fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
17991    }
17992    unsafe { _vqrdmlsh_s16(a, b, c) }
17993}
17994#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
17995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"]
17996#[inline(always)]
17997#[target_feature(enable = "rdm")]
17998#[cfg_attr(test, assert_instr(sqrdmlsh))]
17999#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18000pub fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18001    unsafe extern "unadjusted" {
18002        #[cfg_attr(
18003            any(target_arch = "aarch64", target_arch = "arm64ec"),
18004            link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16"
18005        )]
18006        fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
18007    }
18008    unsafe { _vqrdmlshq_s16(a, b, c) }
18009}
18010#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"]
18012#[inline(always)]
18013#[target_feature(enable = "rdm")]
18014#[cfg_attr(test, assert_instr(sqrdmlsh))]
18015#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18016pub fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18017    unsafe extern "unadjusted" {
18018        #[cfg_attr(
18019            any(target_arch = "aarch64", target_arch = "arm64ec"),
18020            link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32"
18021        )]
18022        fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
18023    }
18024    unsafe { _vqrdmlsh_s32(a, b, c) }
18025}
18026#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"]
18028#[inline(always)]
18029#[target_feature(enable = "rdm")]
18030#[cfg_attr(test, assert_instr(sqrdmlsh))]
18031#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18032pub fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18033    unsafe extern "unadjusted" {
18034        #[cfg_attr(
18035            any(target_arch = "aarch64", target_arch = "arm64ec"),
18036            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32"
18037        )]
18038        fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
18039    }
18040    unsafe { _vqrdmlshq_s32(a, b, c) }
18041}
18042#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"]
18044#[inline(always)]
18045#[target_feature(enable = "rdm")]
18046#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18047#[rustc_legacy_const_generics(3)]
18048#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18049pub fn vqrdmlshh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
18050    static_assert_uimm_bits!(LANE, 2);
18051    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18052}
18053#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"]
18055#[inline(always)]
18056#[target_feature(enable = "rdm")]
18057#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18058#[rustc_legacy_const_generics(3)]
18059#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18060pub fn vqrdmlshh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
18061    static_assert_uimm_bits!(LANE, 3);
18062    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18063}
18064#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"]
18066#[inline(always)]
18067#[target_feature(enable = "rdm")]
18068#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18069#[rustc_legacy_const_generics(3)]
18070#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18071pub fn vqrdmlshs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
18072    static_assert_uimm_bits!(LANE, 1);
18073    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18074}
18075#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"]
18077#[inline(always)]
18078#[target_feature(enable = "rdm")]
18079#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18080#[rustc_legacy_const_generics(3)]
18081#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18082pub fn vqrdmlshs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
18083    static_assert_uimm_bits!(LANE, 2);
18084    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18085}
18086#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"]
18088#[inline(always)]
18089#[target_feature(enable = "rdm")]
18090#[cfg_attr(test, assert_instr(sqrdmlsh))]
18091#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18092pub fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 {
18093    let a: int16x4_t = vdup_n_s16(a);
18094    let b: int16x4_t = vdup_n_s16(b);
18095    let c: int16x4_t = vdup_n_s16(c);
18096    unsafe { simd_extract!(vqrdmlsh_s16(a, b, c), 0) }
18097}
18098#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"]
18100#[inline(always)]
18101#[target_feature(enable = "rdm")]
18102#[cfg_attr(test, assert_instr(sqrdmlsh))]
18103#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18104pub fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 {
18105    let a: int32x2_t = vdup_n_s32(a);
18106    let b: int32x2_t = vdup_n_s32(b);
18107    let c: int32x2_t = vdup_n_s32(c);
18108    unsafe { simd_extract!(vqrdmlsh_s32(a, b, c), 0) }
18109}
18110#[doc = "Signed saturating rounding doubling multiply returning high half"]
18111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"]
18112#[inline(always)]
18113#[target_feature(enable = "neon")]
18114#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18115#[rustc_legacy_const_generics(2)]
18116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18117pub fn vqrdmulhh_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> i16 {
18118    static_assert_uimm_bits!(LANE, 2);
18119    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18120}
18121#[doc = "Signed saturating rounding doubling multiply returning high half"]
18122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"]
18123#[inline(always)]
18124#[target_feature(enable = "neon")]
18125#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18126#[rustc_legacy_const_generics(2)]
18127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18128pub fn vqrdmulhh_laneq_s16<const LANE: i32>(a: i16, b: int16x8_t) -> i16 {
18129    static_assert_uimm_bits!(LANE, 3);
18130    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18131}
18132#[doc = "Signed saturating rounding doubling multiply returning high half"]
18133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"]
18134#[inline(always)]
18135#[target_feature(enable = "neon")]
18136#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18137#[rustc_legacy_const_generics(2)]
18138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18139pub fn vqrdmulhs_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> i32 {
18140    static_assert_uimm_bits!(LANE, 1);
18141    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18142}
18143#[doc = "Signed saturating rounding doubling multiply returning high half"]
18144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"]
18145#[inline(always)]
18146#[target_feature(enable = "neon")]
18147#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18148#[rustc_legacy_const_generics(2)]
18149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18150pub fn vqrdmulhs_laneq_s32<const LANE: i32>(a: i32, b: int32x4_t) -> i32 {
18151    static_assert_uimm_bits!(LANE, 2);
18152    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18153}
18154#[doc = "Signed saturating rounding doubling multiply returning high half"]
18155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"]
18156#[inline(always)]
18157#[target_feature(enable = "neon")]
18158#[cfg_attr(test, assert_instr(sqrdmulh))]
18159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18160pub fn vqrdmulhh_s16(a: i16, b: i16) -> i16 {
18161    unsafe { simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) }
18162}
18163#[doc = "Signed saturating rounding doubling multiply returning high half"]
18164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"]
18165#[inline(always)]
18166#[target_feature(enable = "neon")]
18167#[cfg_attr(test, assert_instr(sqrdmulh))]
18168#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18169pub fn vqrdmulhs_s32(a: i32, b: i32) -> i32 {
18170    unsafe { simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) }
18171}
18172#[doc = "Signed saturating rounding shift left"]
18173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"]
18174#[inline(always)]
18175#[target_feature(enable = "neon")]
18176#[cfg_attr(test, assert_instr(sqrshl))]
18177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18178pub fn vqrshlb_s8(a: i8, b: i8) -> i8 {
18179    let a: int8x8_t = vdup_n_s8(a);
18180    let b: int8x8_t = vdup_n_s8(b);
18181    unsafe { simd_extract!(vqrshl_s8(a, b), 0) }
18182}
18183#[doc = "Signed saturating rounding shift left"]
18184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"]
18185#[inline(always)]
18186#[target_feature(enable = "neon")]
18187#[cfg_attr(test, assert_instr(sqrshl))]
18188#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18189pub fn vqrshlh_s16(a: i16, b: i16) -> i16 {
18190    let a: int16x4_t = vdup_n_s16(a);
18191    let b: int16x4_t = vdup_n_s16(b);
18192    unsafe { simd_extract!(vqrshl_s16(a, b), 0) }
18193}
18194#[doc = "Unsigned signed saturating rounding shift left"]
18195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"]
18196#[inline(always)]
18197#[target_feature(enable = "neon")]
18198#[cfg_attr(test, assert_instr(uqrshl))]
18199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18200pub fn vqrshlb_u8(a: u8, b: i8) -> u8 {
18201    let a: uint8x8_t = vdup_n_u8(a);
18202    let b: int8x8_t = vdup_n_s8(b);
18203    unsafe { simd_extract!(vqrshl_u8(a, b), 0) }
18204}
18205#[doc = "Unsigned signed saturating rounding shift left"]
18206#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"]
18207#[inline(always)]
18208#[target_feature(enable = "neon")]
18209#[cfg_attr(test, assert_instr(uqrshl))]
18210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18211pub fn vqrshlh_u16(a: u16, b: i16) -> u16 {
18212    let a: uint16x4_t = vdup_n_u16(a);
18213    let b: int16x4_t = vdup_n_s16(b);
18214    unsafe { simd_extract!(vqrshl_u16(a, b), 0) }
18215}
18216#[doc = "Signed saturating rounding shift left"]
18217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"]
18218#[inline(always)]
18219#[target_feature(enable = "neon")]
18220#[cfg_attr(test, assert_instr(sqrshl))]
18221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18222pub fn vqrshld_s64(a: i64, b: i64) -> i64 {
18223    unsafe extern "unadjusted" {
18224        #[cfg_attr(
18225            any(target_arch = "aarch64", target_arch = "arm64ec"),
18226            link_name = "llvm.aarch64.neon.sqrshl.i64"
18227        )]
18228        fn _vqrshld_s64(a: i64, b: i64) -> i64;
18229    }
18230    unsafe { _vqrshld_s64(a, b) }
18231}
18232#[doc = "Signed saturating rounding shift left"]
18233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"]
18234#[inline(always)]
18235#[target_feature(enable = "neon")]
18236#[cfg_attr(test, assert_instr(sqrshl))]
18237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18238pub fn vqrshls_s32(a: i32, b: i32) -> i32 {
18239    unsafe extern "unadjusted" {
18240        #[cfg_attr(
18241            any(target_arch = "aarch64", target_arch = "arm64ec"),
18242            link_name = "llvm.aarch64.neon.sqrshl.i32"
18243        )]
18244        fn _vqrshls_s32(a: i32, b: i32) -> i32;
18245    }
18246    unsafe { _vqrshls_s32(a, b) }
18247}
18248#[doc = "Unsigned signed saturating rounding shift left"]
18249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"]
18250#[inline(always)]
18251#[target_feature(enable = "neon")]
18252#[cfg_attr(test, assert_instr(uqrshl))]
18253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18254pub fn vqrshls_u32(a: u32, b: i32) -> u32 {
18255    unsafe extern "unadjusted" {
18256        #[cfg_attr(
18257            any(target_arch = "aarch64", target_arch = "arm64ec"),
18258            link_name = "llvm.aarch64.neon.uqrshl.i32"
18259        )]
18260        fn _vqrshls_u32(a: u32, b: i32) -> u32;
18261    }
18262    unsafe { _vqrshls_u32(a, b) }
18263}
18264#[doc = "Unsigned signed saturating rounding shift left"]
18265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"]
18266#[inline(always)]
18267#[target_feature(enable = "neon")]
18268#[cfg_attr(test, assert_instr(uqrshl))]
18269#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18270pub fn vqrshld_u64(a: u64, b: i64) -> u64 {
18271    unsafe extern "unadjusted" {
18272        #[cfg_attr(
18273            any(target_arch = "aarch64", target_arch = "arm64ec"),
18274            link_name = "llvm.aarch64.neon.uqrshl.i64"
18275        )]
18276        fn _vqrshld_u64(a: u64, b: i64) -> u64;
18277    }
18278    unsafe { _vqrshld_u64(a, b) }
18279}
18280#[doc = "Signed saturating rounded shift right narrow"]
18281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"]
18282#[inline(always)]
18283#[target_feature(enable = "neon")]
18284#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18285#[rustc_legacy_const_generics(2)]
18286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18287pub fn vqrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18288    static_assert!(N >= 1 && N <= 8);
18289    unsafe {
18290        simd_shuffle!(
18291            a,
18292            vqrshrn_n_s16::<N>(b),
18293            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18294        )
18295    }
18296}
18297#[doc = "Signed saturating rounded shift right narrow"]
18298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"]
18299#[inline(always)]
18300#[target_feature(enable = "neon")]
18301#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18302#[rustc_legacy_const_generics(2)]
18303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18304pub fn vqrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18305    static_assert!(N >= 1 && N <= 16);
18306    unsafe { simd_shuffle!(a, vqrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18307}
18308#[doc = "Signed saturating rounded shift right narrow"]
18309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"]
18310#[inline(always)]
18311#[target_feature(enable = "neon")]
18312#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18313#[rustc_legacy_const_generics(2)]
18314#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18315pub fn vqrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18316    static_assert!(N >= 1 && N <= 32);
18317    unsafe { simd_shuffle!(a, vqrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18318}
18319#[doc = "Unsigned saturating rounded shift right narrow"]
18320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"]
18321#[inline(always)]
18322#[target_feature(enable = "neon")]
18323#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18324#[rustc_legacy_const_generics(2)]
18325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18326pub fn vqrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18327    static_assert!(N >= 1 && N <= 8);
18328    unsafe {
18329        simd_shuffle!(
18330            a,
18331            vqrshrn_n_u16::<N>(b),
18332            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18333        )
18334    }
18335}
18336#[doc = "Unsigned saturating rounded shift right narrow"]
18337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"]
18338#[inline(always)]
18339#[target_feature(enable = "neon")]
18340#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18341#[rustc_legacy_const_generics(2)]
18342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18343pub fn vqrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18344    static_assert!(N >= 1 && N <= 16);
18345    unsafe { simd_shuffle!(a, vqrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18346}
18347#[doc = "Unsigned saturating rounded shift right narrow"]
18348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"]
18349#[inline(always)]
18350#[target_feature(enable = "neon")]
18351#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18352#[rustc_legacy_const_generics(2)]
18353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18354pub fn vqrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18355    static_assert!(N >= 1 && N <= 32);
18356    unsafe { simd_shuffle!(a, vqrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18357}
18358#[doc = "Unsigned saturating rounded shift right narrow"]
18359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"]
18360#[inline(always)]
18361#[target_feature(enable = "neon")]
18362#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18363#[rustc_legacy_const_generics(1)]
18364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18365pub fn vqrshrnd_n_u64<const N: i32>(a: u64) -> u32 {
18366    static_assert!(N >= 1 && N <= 32);
18367    let a: uint64x2_t = vdupq_n_u64(a);
18368    unsafe { simd_extract!(vqrshrn_n_u64::<N>(a), 0) }
18369}
18370#[doc = "Unsigned saturating rounded shift right narrow"]
18371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"]
18372#[inline(always)]
18373#[target_feature(enable = "neon")]
18374#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18375#[rustc_legacy_const_generics(1)]
18376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18377pub fn vqrshrnh_n_u16<const N: i32>(a: u16) -> u8 {
18378    static_assert!(N >= 1 && N <= 8);
18379    let a: uint16x8_t = vdupq_n_u16(a);
18380    unsafe { simd_extract!(vqrshrn_n_u16::<N>(a), 0) }
18381}
18382#[doc = "Unsigned saturating rounded shift right narrow"]
18383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"]
18384#[inline(always)]
18385#[target_feature(enable = "neon")]
18386#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18387#[rustc_legacy_const_generics(1)]
18388#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18389pub fn vqrshrns_n_u32<const N: i32>(a: u32) -> u16 {
18390    static_assert!(N >= 1 && N <= 16);
18391    let a: uint32x4_t = vdupq_n_u32(a);
18392    unsafe { simd_extract!(vqrshrn_n_u32::<N>(a), 0) }
18393}
18394#[doc = "Signed saturating rounded shift right narrow"]
18395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"]
18396#[inline(always)]
18397#[target_feature(enable = "neon")]
18398#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18399#[rustc_legacy_const_generics(1)]
18400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18401pub fn vqrshrnh_n_s16<const N: i32>(a: i16) -> i8 {
18402    static_assert!(N >= 1 && N <= 8);
18403    let a: int16x8_t = vdupq_n_s16(a);
18404    unsafe { simd_extract!(vqrshrn_n_s16::<N>(a), 0) }
18405}
18406#[doc = "Signed saturating rounded shift right narrow"]
18407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"]
18408#[inline(always)]
18409#[target_feature(enable = "neon")]
18410#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18411#[rustc_legacy_const_generics(1)]
18412#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18413pub fn vqrshrns_n_s32<const N: i32>(a: i32) -> i16 {
18414    static_assert!(N >= 1 && N <= 16);
18415    let a: int32x4_t = vdupq_n_s32(a);
18416    unsafe { simd_extract!(vqrshrn_n_s32::<N>(a), 0) }
18417}
18418#[doc = "Signed saturating rounded shift right narrow"]
18419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"]
18420#[inline(always)]
18421#[target_feature(enable = "neon")]
18422#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18423#[rustc_legacy_const_generics(1)]
18424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18425pub fn vqrshrnd_n_s64<const N: i32>(a: i64) -> i32 {
18426    static_assert!(N >= 1 && N <= 32);
18427    let a: int64x2_t = vdupq_n_s64(a);
18428    unsafe { simd_extract!(vqrshrn_n_s64::<N>(a), 0) }
18429}
18430#[doc = "Signed saturating rounded shift right unsigned narrow"]
18431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"]
18432#[inline(always)]
18433#[target_feature(enable = "neon")]
18434#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18435#[rustc_legacy_const_generics(2)]
18436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18437pub fn vqrshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
18438    static_assert!(N >= 1 && N <= 8);
18439    unsafe {
18440        simd_shuffle!(
18441            a,
18442            vqrshrun_n_s16::<N>(b),
18443            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18444        )
18445    }
18446}
18447#[doc = "Signed saturating rounded shift right unsigned narrow"]
18448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"]
18449#[inline(always)]
18450#[target_feature(enable = "neon")]
18451#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18452#[rustc_legacy_const_generics(2)]
18453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18454pub fn vqrshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
18455    static_assert!(N >= 1 && N <= 16);
18456    unsafe { simd_shuffle!(a, vqrshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18457}
18458#[doc = "Signed saturating rounded shift right unsigned narrow"]
18459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"]
18460#[inline(always)]
18461#[target_feature(enable = "neon")]
18462#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18463#[rustc_legacy_const_generics(2)]
18464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18465pub fn vqrshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
18466    static_assert!(N >= 1 && N <= 32);
18467    unsafe { simd_shuffle!(a, vqrshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
18468}
18469#[doc = "Signed saturating rounded shift right unsigned narrow"]
18470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"]
18471#[inline(always)]
18472#[target_feature(enable = "neon")]
18473#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18474#[rustc_legacy_const_generics(1)]
18475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18476pub fn vqrshrund_n_s64<const N: i32>(a: i64) -> u32 {
18477    static_assert!(N >= 1 && N <= 32);
18478    let a: int64x2_t = vdupq_n_s64(a);
18479    unsafe { simd_extract!(vqrshrun_n_s64::<N>(a), 0) }
18480}
18481#[doc = "Signed saturating rounded shift right unsigned narrow"]
18482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"]
18483#[inline(always)]
18484#[target_feature(enable = "neon")]
18485#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18486#[rustc_legacy_const_generics(1)]
18487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18488pub fn vqrshrunh_n_s16<const N: i32>(a: i16) -> u8 {
18489    static_assert!(N >= 1 && N <= 8);
18490    let a: int16x8_t = vdupq_n_s16(a);
18491    unsafe { simd_extract!(vqrshrun_n_s16::<N>(a), 0) }
18492}
18493#[doc = "Signed saturating rounded shift right unsigned narrow"]
18494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"]
18495#[inline(always)]
18496#[target_feature(enable = "neon")]
18497#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
18498#[rustc_legacy_const_generics(1)]
18499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18500pub fn vqrshruns_n_s32<const N: i32>(a: i32) -> u16 {
18501    static_assert!(N >= 1 && N <= 16);
18502    let a: int32x4_t = vdupq_n_s32(a);
18503    unsafe { simd_extract!(vqrshrun_n_s32::<N>(a), 0) }
18504}
18505#[doc = "Signed saturating shift left"]
18506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"]
18507#[inline(always)]
18508#[target_feature(enable = "neon")]
18509#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18510#[rustc_legacy_const_generics(1)]
18511#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18512pub fn vqshlb_n_s8<const N: i32>(a: i8) -> i8 {
18513    static_assert_uimm_bits!(N, 3);
18514    unsafe { simd_extract!(vqshl_n_s8::<N>(vdup_n_s8(a)), 0) }
18515}
18516#[doc = "Signed saturating shift left"]
18517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"]
18518#[inline(always)]
18519#[target_feature(enable = "neon")]
18520#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18521#[rustc_legacy_const_generics(1)]
18522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18523pub fn vqshld_n_s64<const N: i32>(a: i64) -> i64 {
18524    static_assert_uimm_bits!(N, 6);
18525    unsafe { simd_extract!(vqshl_n_s64::<N>(vdup_n_s64(a)), 0) }
18526}
18527#[doc = "Signed saturating shift left"]
18528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"]
18529#[inline(always)]
18530#[target_feature(enable = "neon")]
18531#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18532#[rustc_legacy_const_generics(1)]
18533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18534pub fn vqshlh_n_s16<const N: i32>(a: i16) -> i16 {
18535    static_assert_uimm_bits!(N, 4);
18536    unsafe { simd_extract!(vqshl_n_s16::<N>(vdup_n_s16(a)), 0) }
18537}
18538#[doc = "Signed saturating shift left"]
18539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"]
18540#[inline(always)]
18541#[target_feature(enable = "neon")]
18542#[cfg_attr(test, assert_instr(sqshl, N = 2))]
18543#[rustc_legacy_const_generics(1)]
18544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18545pub fn vqshls_n_s32<const N: i32>(a: i32) -> i32 {
18546    static_assert_uimm_bits!(N, 5);
18547    unsafe { simd_extract!(vqshl_n_s32::<N>(vdup_n_s32(a)), 0) }
18548}
18549#[doc = "Unsigned saturating shift left"]
18550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"]
18551#[inline(always)]
18552#[target_feature(enable = "neon")]
18553#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18554#[rustc_legacy_const_generics(1)]
18555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18556pub fn vqshlb_n_u8<const N: i32>(a: u8) -> u8 {
18557    static_assert_uimm_bits!(N, 3);
18558    unsafe { simd_extract!(vqshl_n_u8::<N>(vdup_n_u8(a)), 0) }
18559}
18560#[doc = "Unsigned saturating shift left"]
18561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"]
18562#[inline(always)]
18563#[target_feature(enable = "neon")]
18564#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18565#[rustc_legacy_const_generics(1)]
18566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18567pub fn vqshld_n_u64<const N: i32>(a: u64) -> u64 {
18568    static_assert_uimm_bits!(N, 6);
18569    unsafe { simd_extract!(vqshl_n_u64::<N>(vdup_n_u64(a)), 0) }
18570}
18571#[doc = "Unsigned saturating shift left"]
18572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"]
18573#[inline(always)]
18574#[target_feature(enable = "neon")]
18575#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18576#[rustc_legacy_const_generics(1)]
18577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18578pub fn vqshlh_n_u16<const N: i32>(a: u16) -> u16 {
18579    static_assert_uimm_bits!(N, 4);
18580    unsafe { simd_extract!(vqshl_n_u16::<N>(vdup_n_u16(a)), 0) }
18581}
18582#[doc = "Unsigned saturating shift left"]
18583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"]
18584#[inline(always)]
18585#[target_feature(enable = "neon")]
18586#[cfg_attr(test, assert_instr(uqshl, N = 2))]
18587#[rustc_legacy_const_generics(1)]
18588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18589pub fn vqshls_n_u32<const N: i32>(a: u32) -> u32 {
18590    static_assert_uimm_bits!(N, 5);
18591    unsafe { simd_extract!(vqshl_n_u32::<N>(vdup_n_u32(a)), 0) }
18592}
18593#[doc = "Signed saturating shift left"]
18594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"]
18595#[inline(always)]
18596#[target_feature(enable = "neon")]
18597#[cfg_attr(test, assert_instr(sqshl))]
18598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18599pub fn vqshlb_s8(a: i8, b: i8) -> i8 {
18600    let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b));
18601    unsafe { simd_extract!(c, 0) }
18602}
18603#[doc = "Signed saturating shift left"]
18604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"]
18605#[inline(always)]
18606#[target_feature(enable = "neon")]
18607#[cfg_attr(test, assert_instr(sqshl))]
18608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18609pub fn vqshlh_s16(a: i16, b: i16) -> i16 {
18610    let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b));
18611    unsafe { simd_extract!(c, 0) }
18612}
18613#[doc = "Signed saturating shift left"]
18614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"]
18615#[inline(always)]
18616#[target_feature(enable = "neon")]
18617#[cfg_attr(test, assert_instr(sqshl))]
18618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18619pub fn vqshls_s32(a: i32, b: i32) -> i32 {
18620    let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b));
18621    unsafe { simd_extract!(c, 0) }
18622}
18623#[doc = "Unsigned saturating shift left"]
18624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"]
18625#[inline(always)]
18626#[target_feature(enable = "neon")]
18627#[cfg_attr(test, assert_instr(uqshl))]
18628#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18629pub fn vqshlb_u8(a: u8, b: i8) -> u8 {
18630    let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b));
18631    unsafe { simd_extract!(c, 0) }
18632}
18633#[doc = "Unsigned saturating shift left"]
18634#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"]
18635#[inline(always)]
18636#[target_feature(enable = "neon")]
18637#[cfg_attr(test, assert_instr(uqshl))]
18638#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18639pub fn vqshlh_u16(a: u16, b: i16) -> u16 {
18640    let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b));
18641    unsafe { simd_extract!(c, 0) }
18642}
18643#[doc = "Unsigned saturating shift left"]
18644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"]
18645#[inline(always)]
18646#[target_feature(enable = "neon")]
18647#[cfg_attr(test, assert_instr(uqshl))]
18648#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18649pub fn vqshls_u32(a: u32, b: i32) -> u32 {
18650    let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b));
18651    unsafe { simd_extract!(c, 0) }
18652}
18653#[doc = "Signed saturating shift left"]
18654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"]
18655#[inline(always)]
18656#[target_feature(enable = "neon")]
18657#[cfg_attr(test, assert_instr(sqshl))]
18658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18659pub fn vqshld_s64(a: i64, b: i64) -> i64 {
18660    unsafe extern "unadjusted" {
18661        #[cfg_attr(
18662            any(target_arch = "aarch64", target_arch = "arm64ec"),
18663            link_name = "llvm.aarch64.neon.sqshl.i64"
18664        )]
18665        fn _vqshld_s64(a: i64, b: i64) -> i64;
18666    }
18667    unsafe { _vqshld_s64(a, b) }
18668}
18669#[doc = "Unsigned saturating shift left"]
18670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"]
18671#[inline(always)]
18672#[target_feature(enable = "neon")]
18673#[cfg_attr(test, assert_instr(uqshl))]
18674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18675pub fn vqshld_u64(a: u64, b: i64) -> u64 {
18676    unsafe extern "unadjusted" {
18677        #[cfg_attr(
18678            any(target_arch = "aarch64", target_arch = "arm64ec"),
18679            link_name = "llvm.aarch64.neon.uqshl.i64"
18680        )]
18681        fn _vqshld_u64(a: u64, b: i64) -> u64;
18682    }
18683    unsafe { _vqshld_u64(a, b) }
18684}
18685#[doc = "Signed saturating shift left unsigned"]
18686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"]
18687#[inline(always)]
18688#[target_feature(enable = "neon")]
18689#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18690#[rustc_legacy_const_generics(1)]
18691#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18692pub fn vqshlub_n_s8<const N: i32>(a: i8) -> u8 {
18693    static_assert_uimm_bits!(N, 3);
18694    unsafe { simd_extract!(vqshlu_n_s8::<N>(vdup_n_s8(a)), 0) }
18695}
18696#[doc = "Signed saturating shift left unsigned"]
18697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"]
18698#[inline(always)]
18699#[target_feature(enable = "neon")]
18700#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18701#[rustc_legacy_const_generics(1)]
18702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18703pub fn vqshlud_n_s64<const N: i32>(a: i64) -> u64 {
18704    static_assert_uimm_bits!(N, 6);
18705    unsafe { simd_extract!(vqshlu_n_s64::<N>(vdup_n_s64(a)), 0) }
18706}
18707#[doc = "Signed saturating shift left unsigned"]
18708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"]
18709#[inline(always)]
18710#[target_feature(enable = "neon")]
18711#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18712#[rustc_legacy_const_generics(1)]
18713#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18714pub fn vqshluh_n_s16<const N: i32>(a: i16) -> u16 {
18715    static_assert_uimm_bits!(N, 4);
18716    unsafe { simd_extract!(vqshlu_n_s16::<N>(vdup_n_s16(a)), 0) }
18717}
18718#[doc = "Signed saturating shift left unsigned"]
18719#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"]
18720#[inline(always)]
18721#[target_feature(enable = "neon")]
18722#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
18723#[rustc_legacy_const_generics(1)]
18724#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18725pub fn vqshlus_n_s32<const N: i32>(a: i32) -> u32 {
18726    static_assert_uimm_bits!(N, 5);
18727    unsafe { simd_extract!(vqshlu_n_s32::<N>(vdup_n_s32(a)), 0) }
18728}
18729#[doc = "Signed saturating shift right narrow"]
18730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"]
18731#[inline(always)]
18732#[target_feature(enable = "neon")]
18733#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18734#[rustc_legacy_const_generics(2)]
18735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18736pub fn vqshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18737    static_assert!(N >= 1 && N <= 8);
18738    unsafe {
18739        simd_shuffle!(
18740            a,
18741            vqshrn_n_s16::<N>(b),
18742            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18743        )
18744    }
18745}
18746#[doc = "Signed saturating shift right narrow"]
18747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"]
18748#[inline(always)]
18749#[target_feature(enable = "neon")]
18750#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18751#[rustc_legacy_const_generics(2)]
18752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18753pub fn vqshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18754    static_assert!(N >= 1 && N <= 16);
18755    unsafe { simd_shuffle!(a, vqshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18756}
18757#[doc = "Signed saturating shift right narrow"]
18758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"]
18759#[inline(always)]
18760#[target_feature(enable = "neon")]
18761#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
18762#[rustc_legacy_const_generics(2)]
18763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18764pub fn vqshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18765    static_assert!(N >= 1 && N <= 32);
18766    unsafe { simd_shuffle!(a, vqshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18767}
18768#[doc = "Unsigned saturating shift right narrow"]
18769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"]
18770#[inline(always)]
18771#[target_feature(enable = "neon")]
18772#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18773#[rustc_legacy_const_generics(2)]
18774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18775pub fn vqshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18776    static_assert!(N >= 1 && N <= 8);
18777    unsafe {
18778        simd_shuffle!(
18779            a,
18780            vqshrn_n_u16::<N>(b),
18781            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18782        )
18783    }
18784}
18785#[doc = "Unsigned saturating shift right narrow"]
18786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"]
18787#[inline(always)]
18788#[target_feature(enable = "neon")]
18789#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18790#[rustc_legacy_const_generics(2)]
18791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18792pub fn vqshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18793    static_assert!(N >= 1 && N <= 16);
18794    unsafe { simd_shuffle!(a, vqshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18795}
18796#[doc = "Unsigned saturating shift right narrow"]
18797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"]
18798#[inline(always)]
18799#[target_feature(enable = "neon")]
18800#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
18801#[rustc_legacy_const_generics(2)]
18802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18803pub fn vqshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18804    static_assert!(N >= 1 && N <= 32);
18805    unsafe { simd_shuffle!(a, vqshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18806}
18807#[doc = "Signed saturating shift right narrow"]
18808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"]
18809#[inline(always)]
18810#[target_feature(enable = "neon")]
18811#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
18812#[rustc_legacy_const_generics(1)]
18813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18814pub fn vqshrnd_n_s64<const N: i32>(a: i64) -> i32 {
18815    static_assert!(N >= 1 && N <= 32);
18816    unsafe extern "unadjusted" {
18817        #[cfg_attr(
18818            any(target_arch = "aarch64", target_arch = "arm64ec"),
18819            link_name = "llvm.aarch64.neon.sqshrn.i32"
18820        )]
18821        fn _vqshrnd_n_s64(a: i64, n: i32) -> i32;
18822    }
18823    unsafe { _vqshrnd_n_s64(a, N) }
18824}
18825#[doc = "Unsigned saturating shift right narrow"]
18826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"]
18827#[inline(always)]
18828#[target_feature(enable = "neon")]
18829#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
18830#[rustc_legacy_const_generics(1)]
18831#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18832pub fn vqshrnd_n_u64<const N: i32>(a: u64) -> u32 {
18833    static_assert!(N >= 1 && N <= 32);
18834    unsafe extern "unadjusted" {
18835        #[cfg_attr(
18836            any(target_arch = "aarch64", target_arch = "arm64ec"),
18837            link_name = "llvm.aarch64.neon.uqshrn.i32"
18838        )]
18839        fn _vqshrnd_n_u64(a: u64, n: i32) -> u32;
18840    }
18841    unsafe { _vqshrnd_n_u64(a, N) }
18842}
18843#[doc = "Signed saturating shift right narrow"]
18844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"]
18845#[inline(always)]
18846#[target_feature(enable = "neon")]
18847#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
18848#[rustc_legacy_const_generics(1)]
18849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18850pub fn vqshrnh_n_s16<const N: i32>(a: i16) -> i8 {
18851    static_assert!(N >= 1 && N <= 8);
18852    unsafe { simd_extract!(vqshrn_n_s16::<N>(vdupq_n_s16(a)), 0) }
18853}
18854#[doc = "Signed saturating shift right narrow"]
18855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"]
18856#[inline(always)]
18857#[target_feature(enable = "neon")]
18858#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
18859#[rustc_legacy_const_generics(1)]
18860#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18861pub fn vqshrns_n_s32<const N: i32>(a: i32) -> i16 {
18862    static_assert!(N >= 1 && N <= 16);
18863    unsafe { simd_extract!(vqshrn_n_s32::<N>(vdupq_n_s32(a)), 0) }
18864}
18865#[doc = "Unsigned saturating shift right narrow"]
18866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"]
18867#[inline(always)]
18868#[target_feature(enable = "neon")]
18869#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
18870#[rustc_legacy_const_generics(1)]
18871#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18872pub fn vqshrnh_n_u16<const N: i32>(a: u16) -> u8 {
18873    static_assert!(N >= 1 && N <= 8);
18874    unsafe { simd_extract!(vqshrn_n_u16::<N>(vdupq_n_u16(a)), 0) }
18875}
18876#[doc = "Unsigned saturating shift right narrow"]
18877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"]
18878#[inline(always)]
18879#[target_feature(enable = "neon")]
18880#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
18881#[rustc_legacy_const_generics(1)]
18882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18883pub fn vqshrns_n_u32<const N: i32>(a: u32) -> u16 {
18884    static_assert!(N >= 1 && N <= 16);
18885    unsafe { simd_extract!(vqshrn_n_u32::<N>(vdupq_n_u32(a)), 0) }
18886}
18887#[doc = "Signed saturating shift right unsigned narrow"]
18888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"]
18889#[inline(always)]
18890#[target_feature(enable = "neon")]
18891#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
18892#[rustc_legacy_const_generics(2)]
18893#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18894pub fn vqshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
18895    static_assert!(N >= 1 && N <= 8);
18896    unsafe {
18897        simd_shuffle!(
18898            a,
18899            vqshrun_n_s16::<N>(b),
18900            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18901        )
18902    }
18903}
18904#[doc = "Signed saturating shift right unsigned narrow"]
18905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"]
18906#[inline(always)]
18907#[target_feature(enable = "neon")]
18908#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
18909#[rustc_legacy_const_generics(2)]
18910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18911pub fn vqshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
18912    static_assert!(N >= 1 && N <= 16);
18913    unsafe { simd_shuffle!(a, vqshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18914}
18915#[doc = "Signed saturating shift right unsigned narrow"]
18916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"]
18917#[inline(always)]
18918#[target_feature(enable = "neon")]
18919#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
18920#[rustc_legacy_const_generics(2)]
18921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18922pub fn vqshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
18923    static_assert!(N >= 1 && N <= 32);
18924    unsafe { simd_shuffle!(a, vqshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
18925}
18926#[doc = "Signed saturating shift right unsigned narrow"]
18927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"]
18928#[inline(always)]
18929#[target_feature(enable = "neon")]
18930#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
18931#[rustc_legacy_const_generics(1)]
18932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18933pub fn vqshrund_n_s64<const N: i32>(a: i64) -> u32 {
18934    static_assert!(N >= 1 && N <= 32);
18935    unsafe { simd_extract!(vqshrun_n_s64::<N>(vdupq_n_s64(a)), 0) }
18936}
18937#[doc = "Signed saturating shift right unsigned narrow"]
18938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"]
18939#[inline(always)]
18940#[target_feature(enable = "neon")]
18941#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
18942#[rustc_legacy_const_generics(1)]
18943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18944pub fn vqshrunh_n_s16<const N: i32>(a: i16) -> u8 {
18945    static_assert!(N >= 1 && N <= 8);
18946    unsafe { simd_extract!(vqshrun_n_s16::<N>(vdupq_n_s16(a)), 0) }
18947}
18948#[doc = "Signed saturating shift right unsigned narrow"]
18949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"]
18950#[inline(always)]
18951#[target_feature(enable = "neon")]
18952#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
18953#[rustc_legacy_const_generics(1)]
18954#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18955pub fn vqshruns_n_s32<const N: i32>(a: i32) -> u16 {
18956    static_assert!(N >= 1 && N <= 16);
18957    unsafe { simd_extract!(vqshrun_n_s32::<N>(vdupq_n_s32(a)), 0) }
18958}
18959#[doc = "Saturating subtract"]
18960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"]
18961#[inline(always)]
18962#[target_feature(enable = "neon")]
18963#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18964#[cfg_attr(test, assert_instr(sqsub))]
18965pub fn vqsubb_s8(a: i8, b: i8) -> i8 {
18966    let a: int8x8_t = vdup_n_s8(a);
18967    let b: int8x8_t = vdup_n_s8(b);
18968    unsafe { simd_extract!(vqsub_s8(a, b), 0) }
18969}
18970#[doc = "Saturating subtract"]
18971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"]
18972#[inline(always)]
18973#[target_feature(enable = "neon")]
18974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18975#[cfg_attr(test, assert_instr(sqsub))]
18976pub fn vqsubh_s16(a: i16, b: i16) -> i16 {
18977    let a: int16x4_t = vdup_n_s16(a);
18978    let b: int16x4_t = vdup_n_s16(b);
18979    unsafe { simd_extract!(vqsub_s16(a, b), 0) }
18980}
18981#[doc = "Saturating subtract"]
18982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"]
18983#[inline(always)]
18984#[target_feature(enable = "neon")]
18985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18986#[cfg_attr(test, assert_instr(uqsub))]
18987pub fn vqsubb_u8(a: u8, b: u8) -> u8 {
18988    let a: uint8x8_t = vdup_n_u8(a);
18989    let b: uint8x8_t = vdup_n_u8(b);
18990    unsafe { simd_extract!(vqsub_u8(a, b), 0) }
18991}
18992#[doc = "Saturating subtract"]
18993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"]
18994#[inline(always)]
18995#[target_feature(enable = "neon")]
18996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18997#[cfg_attr(test, assert_instr(uqsub))]
18998pub fn vqsubh_u16(a: u16, b: u16) -> u16 {
18999    let a: uint16x4_t = vdup_n_u16(a);
19000    let b: uint16x4_t = vdup_n_u16(b);
19001    unsafe { simd_extract!(vqsub_u16(a, b), 0) }
19002}
19003#[doc = "Saturating subtract"]
19004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"]
19005#[inline(always)]
19006#[target_feature(enable = "neon")]
19007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19008#[cfg_attr(test, assert_instr(sqsub))]
19009pub fn vqsubs_s32(a: i32, b: i32) -> i32 {
19010    unsafe extern "unadjusted" {
19011        #[cfg_attr(
19012            any(target_arch = "aarch64", target_arch = "arm64ec"),
19013            link_name = "llvm.aarch64.neon.sqsub.i32"
19014        )]
19015        fn _vqsubs_s32(a: i32, b: i32) -> i32;
19016    }
19017    unsafe { _vqsubs_s32(a, b) }
19018}
19019#[doc = "Saturating subtract"]
19020#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"]
19021#[inline(always)]
19022#[target_feature(enable = "neon")]
19023#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19024#[cfg_attr(test, assert_instr(sqsub))]
19025pub fn vqsubd_s64(a: i64, b: i64) -> i64 {
19026    unsafe extern "unadjusted" {
19027        #[cfg_attr(
19028            any(target_arch = "aarch64", target_arch = "arm64ec"),
19029            link_name = "llvm.aarch64.neon.sqsub.i64"
19030        )]
19031        fn _vqsubd_s64(a: i64, b: i64) -> i64;
19032    }
19033    unsafe { _vqsubd_s64(a, b) }
19034}
19035#[doc = "Saturating subtract"]
19036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"]
19037#[inline(always)]
19038#[target_feature(enable = "neon")]
19039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19040#[cfg_attr(test, assert_instr(uqsub))]
19041pub fn vqsubs_u32(a: u32, b: u32) -> u32 {
19042    unsafe extern "unadjusted" {
19043        #[cfg_attr(
19044            any(target_arch = "aarch64", target_arch = "arm64ec"),
19045            link_name = "llvm.aarch64.neon.uqsub.i32"
19046        )]
19047        fn _vqsubs_u32(a: u32, b: u32) -> u32;
19048    }
19049    unsafe { _vqsubs_u32(a, b) }
19050}
19051#[doc = "Saturating subtract"]
19052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"]
19053#[inline(always)]
19054#[target_feature(enable = "neon")]
19055#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19056#[cfg_attr(test, assert_instr(uqsub))]
19057pub fn vqsubd_u64(a: u64, b: u64) -> u64 {
19058    unsafe extern "unadjusted" {
19059        #[cfg_attr(
19060            any(target_arch = "aarch64", target_arch = "arm64ec"),
19061            link_name = "llvm.aarch64.neon.uqsub.i64"
19062        )]
19063        fn _vqsubd_u64(a: u64, b: u64) -> u64;
19064    }
19065    unsafe { _vqsubd_u64(a, b) }
19066}
19067#[doc = "Table look-up"]
19068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"]
19069#[inline(always)]
19070#[target_feature(enable = "neon")]
19071#[cfg_attr(test, assert_instr(tbl))]
19072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19073fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19074    unsafe extern "unadjusted" {
19075        #[cfg_attr(
19076            any(target_arch = "aarch64", target_arch = "arm64ec"),
19077            link_name = "llvm.aarch64.neon.tbl1.v8i8"
19078        )]
19079        fn _vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t;
19080    }
19081    unsafe { _vqtbl1(a, b) }
19082}
19083#[doc = "Table look-up"]
19084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"]
19085#[inline(always)]
19086#[target_feature(enable = "neon")]
19087#[cfg_attr(test, assert_instr(tbl))]
19088#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19089fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19090    unsafe extern "unadjusted" {
19091        #[cfg_attr(
19092            any(target_arch = "aarch64", target_arch = "arm64ec"),
19093            link_name = "llvm.aarch64.neon.tbl1.v16i8"
19094        )]
19095        fn _vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
19096    }
19097    unsafe { _vqtbl1q(a, b) }
19098}
19099#[doc = "Table look-up"]
19100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"]
19101#[inline(always)]
19102#[target_feature(enable = "neon")]
19103#[cfg_attr(test, assert_instr(tbl))]
19104#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19105pub fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19106    vqtbl1(a, b)
19107}
19108#[doc = "Table look-up"]
19109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"]
19110#[inline(always)]
19111#[target_feature(enable = "neon")]
19112#[cfg_attr(test, assert_instr(tbl))]
19113#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19114pub fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19115    vqtbl1q(a, b)
19116}
19117#[doc = "Table look-up"]
19118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"]
19119#[inline(always)]
19120#[target_feature(enable = "neon")]
19121#[cfg_attr(test, assert_instr(tbl))]
19122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19123pub fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t {
19124    unsafe { transmute(vqtbl1(transmute(a), b)) }
19125}
19126#[doc = "Table look-up"]
19127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"]
19128#[inline(always)]
19129#[target_feature(enable = "neon")]
19130#[cfg_attr(test, assert_instr(tbl))]
19131#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19132pub fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
19133    unsafe { transmute(vqtbl1q(transmute(a), b)) }
19134}
19135#[doc = "Table look-up"]
19136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"]
19137#[inline(always)]
19138#[target_feature(enable = "neon")]
19139#[cfg_attr(test, assert_instr(tbl))]
19140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19141pub fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t {
19142    unsafe { transmute(vqtbl1(transmute(a), b)) }
19143}
19144#[doc = "Table look-up"]
19145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"]
19146#[inline(always)]
19147#[target_feature(enable = "neon")]
19148#[cfg_attr(test, assert_instr(tbl))]
19149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19150pub fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
19151    unsafe { transmute(vqtbl1q(transmute(a), b)) }
19152}
19153#[doc = "Table look-up"]
19154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"]
19155#[inline(always)]
19156#[target_feature(enable = "neon")]
19157#[cfg_attr(test, assert_instr(tbl))]
19158#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19159fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19160    unsafe extern "unadjusted" {
19161        #[cfg_attr(
19162            any(target_arch = "aarch64", target_arch = "arm64ec"),
19163            link_name = "llvm.aarch64.neon.tbl2.v8i8"
19164        )]
19165        fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
19166    }
19167    unsafe { _vqtbl2(a, b, c) }
19168}
19169#[doc = "Table look-up"]
19170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"]
19171#[inline(always)]
19172#[target_feature(enable = "neon")]
19173#[cfg_attr(test, assert_instr(tbl))]
19174#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19175fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19176    unsafe extern "unadjusted" {
19177        #[cfg_attr(
19178            any(target_arch = "aarch64", target_arch = "arm64ec"),
19179            link_name = "llvm.aarch64.neon.tbl2.v16i8"
19180        )]
19181        fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
19182    }
19183    unsafe { _vqtbl2q(a, b, c) }
19184}
19185#[doc = "Table look-up"]
19186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"]
19187#[inline(always)]
19188#[target_feature(enable = "neon")]
19189#[cfg_attr(test, assert_instr(tbl))]
19190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19191pub fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t {
19192    vqtbl2(a.0, a.1, b)
19193}
19194#[doc = "Table look-up"]
19195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"]
19196#[inline(always)]
19197#[target_feature(enable = "neon")]
19198#[cfg_attr(test, assert_instr(tbl))]
19199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19200pub fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t {
19201    vqtbl2q(a.0, a.1, b)
19202}
19203#[doc = "Table look-up"]
19204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19205#[inline(always)]
19206#[target_feature(enable = "neon")]
19207#[cfg_attr(test, assert_instr(tbl))]
19208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19209pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19210    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19211}
19212#[doc = "Table look-up"]
19213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19214#[inline(always)]
19215#[target_feature(enable = "neon")]
19216#[cfg_attr(test, assert_instr(tbl))]
19217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19218pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19219    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19220}
19221#[doc = "Table look-up"]
19222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19223#[inline(always)]
19224#[target_feature(enable = "neon")]
19225#[cfg_attr(test, assert_instr(tbl))]
19226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19227pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19228    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19229}
19230#[doc = "Table look-up"]
19231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19232#[inline(always)]
19233#[target_feature(enable = "neon")]
19234#[cfg_attr(test, assert_instr(tbl))]
19235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19236pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19237    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19238}
19239#[doc = "Table look-up"]
19240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"]
19241#[inline(always)]
19242#[target_feature(enable = "neon")]
19243#[cfg_attr(test, assert_instr(tbl))]
19244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19245fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
19246    unsafe extern "unadjusted" {
19247        #[cfg_attr(
19248            any(target_arch = "aarch64", target_arch = "arm64ec"),
19249            link_name = "llvm.aarch64.neon.tbl3.v8i8"
19250        )]
19251        fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
19252    }
19253    unsafe { _vqtbl3(a, b, c, d) }
19254}
19255#[doc = "Table look-up"]
19256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"]
19257#[inline(always)]
19258#[target_feature(enable = "neon")]
19259#[cfg_attr(test, assert_instr(tbl))]
19260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19261fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
19262    unsafe extern "unadjusted" {
19263        #[cfg_attr(
19264            any(target_arch = "aarch64", target_arch = "arm64ec"),
19265            link_name = "llvm.aarch64.neon.tbl3.v16i8"
19266        )]
19267        fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
19268    }
19269    unsafe { _vqtbl3q(a, b, c, d) }
19270}
19271#[doc = "Table look-up"]
19272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"]
19273#[inline(always)]
19274#[target_feature(enable = "neon")]
19275#[cfg_attr(test, assert_instr(tbl))]
19276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19277pub fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t {
19278    vqtbl3(a.0, a.1, a.2, b)
19279}
19280#[doc = "Table look-up"]
19281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"]
19282#[inline(always)]
19283#[target_feature(enable = "neon")]
19284#[cfg_attr(test, assert_instr(tbl))]
19285#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19286pub fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t {
19287    vqtbl3q(a.0, a.1, a.2, b)
19288}
19289#[doc = "Table look-up"]
19290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19291#[inline(always)]
19292#[target_feature(enable = "neon")]
19293#[cfg_attr(test, assert_instr(tbl))]
19294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19295pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19296    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19297}
19298#[doc = "Table look-up"]
19299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
19300#[inline(always)]
19301#[target_feature(enable = "neon")]
19302#[cfg_attr(test, assert_instr(tbl))]
19303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19304pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
19305    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19306}
19307#[doc = "Table look-up"]
19308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
19309#[inline(always)]
19310#[target_feature(enable = "neon")]
19311#[cfg_attr(test, assert_instr(tbl))]
19312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19313pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
19314    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19315}
19316#[doc = "Table look-up"]
19317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
19318#[inline(always)]
19319#[target_feature(enable = "neon")]
19320#[cfg_attr(test, assert_instr(tbl))]
19321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19322pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
19323    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19324}
19325#[doc = "Table look-up"]
19326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"]
19327#[inline(always)]
19328#[target_feature(enable = "neon")]
19329#[cfg_attr(test, assert_instr(tbl))]
19330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19331fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
19332    unsafe extern "unadjusted" {
19333        #[cfg_attr(
19334            any(target_arch = "aarch64", target_arch = "arm64ec"),
19335            link_name = "llvm.aarch64.neon.tbl4.v8i8"
19336        )]
19337        fn _vqtbl4(
19338            a: int8x16_t,
19339            b: int8x16_t,
19340            c: int8x16_t,
19341            d: int8x16_t,
19342            e: uint8x8_t,
19343        ) -> int8x8_t;
19344    }
19345    unsafe { _vqtbl4(a, b, c, d, e) }
19346}
19347#[doc = "Table look-up"]
19348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"]
19349#[inline(always)]
19350#[target_feature(enable = "neon")]
19351#[cfg_attr(test, assert_instr(tbl))]
19352#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19353fn vqtbl4q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
19354    unsafe extern "unadjusted" {
19355        #[cfg_attr(
19356            any(target_arch = "aarch64", target_arch = "arm64ec"),
19357            link_name = "llvm.aarch64.neon.tbl4.v16i8"
19358        )]
19359        fn _vqtbl4q(
19360            a: int8x16_t,
19361            b: int8x16_t,
19362            c: int8x16_t,
19363            d: int8x16_t,
19364            e: uint8x16_t,
19365        ) -> int8x16_t;
19366    }
19367    unsafe { _vqtbl4q(a, b, c, d, e) }
19368}
19369#[doc = "Table look-up"]
19370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"]
19371#[inline(always)]
19372#[target_feature(enable = "neon")]
19373#[cfg_attr(test, assert_instr(tbl))]
19374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19375pub fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t {
19376    vqtbl4(a.0, a.1, a.2, a.3, b)
19377}
19378#[doc = "Table look-up"]
19379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"]
19380#[inline(always)]
19381#[target_feature(enable = "neon")]
19382#[cfg_attr(test, assert_instr(tbl))]
19383#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19384pub fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t {
19385    vqtbl4q(a.0, a.1, a.2, a.3, b)
19386}
19387#[doc = "Table look-up"]
19388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
19389#[inline(always)]
19390#[target_feature(enable = "neon")]
19391#[cfg_attr(test, assert_instr(tbl))]
19392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19393pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
19394    unsafe {
19395        transmute(vqtbl4(
19396            transmute(a.0),
19397            transmute(a.1),
19398            transmute(a.2),
19399            transmute(a.3),
19400            b,
19401        ))
19402    }
19403}
19404#[doc = "Table look-up"]
19405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
19406#[inline(always)]
19407#[target_feature(enable = "neon")]
19408#[cfg_attr(test, assert_instr(tbl))]
19409#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19410pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
19411    unsafe {
19412        transmute(vqtbl4q(
19413            transmute(a.0),
19414            transmute(a.1),
19415            transmute(a.2),
19416            transmute(a.3),
19417            b,
19418        ))
19419    }
19420}
19421#[doc = "Table look-up"]
19422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
19423#[inline(always)]
19424#[target_feature(enable = "neon")]
19425#[cfg_attr(test, assert_instr(tbl))]
19426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19427pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
19428    unsafe {
19429        transmute(vqtbl4(
19430            transmute(a.0),
19431            transmute(a.1),
19432            transmute(a.2),
19433            transmute(a.3),
19434            b,
19435        ))
19436    }
19437}
19438#[doc = "Table look-up"]
19439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
19440#[inline(always)]
19441#[target_feature(enable = "neon")]
19442#[cfg_attr(test, assert_instr(tbl))]
19443#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19444pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
19445    unsafe {
19446        transmute(vqtbl4q(
19447            transmute(a.0),
19448            transmute(a.1),
19449            transmute(a.2),
19450            transmute(a.3),
19451            b,
19452        ))
19453    }
19454}
19455#[doc = "Extended table look-up"]
19456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"]
19457#[inline(always)]
19458#[target_feature(enable = "neon")]
19459#[cfg_attr(test, assert_instr(tbx))]
19460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19461fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19462    unsafe extern "unadjusted" {
19463        #[cfg_attr(
19464            any(target_arch = "aarch64", target_arch = "arm64ec"),
19465            link_name = "llvm.aarch64.neon.tbx1.v8i8"
19466        )]
19467        fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
19468    }
19469    unsafe { _vqtbx1(a, b, c) }
19470}
19471#[doc = "Extended table look-up"]
19472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"]
19473#[inline(always)]
19474#[target_feature(enable = "neon")]
19475#[cfg_attr(test, assert_instr(tbx))]
19476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19477fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19478    unsafe extern "unadjusted" {
19479        #[cfg_attr(
19480            any(target_arch = "aarch64", target_arch = "arm64ec"),
19481            link_name = "llvm.aarch64.neon.tbx1.v16i8"
19482        )]
19483        fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
19484    }
19485    unsafe { _vqtbx1q(a, b, c) }
19486}
19487#[doc = "Extended table look-up"]
19488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"]
19489#[inline(always)]
19490#[target_feature(enable = "neon")]
19491#[cfg_attr(test, assert_instr(tbx))]
19492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19493pub fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19494    vqtbx1(a, b, c)
19495}
19496#[doc = "Extended table look-up"]
19497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"]
19498#[inline(always)]
19499#[target_feature(enable = "neon")]
19500#[cfg_attr(test, assert_instr(tbx))]
19501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19502pub fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19503    vqtbx1q(a, b, c)
19504}
19505#[doc = "Extended table look-up"]
19506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"]
19507#[inline(always)]
19508#[target_feature(enable = "neon")]
19509#[cfg_attr(test, assert_instr(tbx))]
19510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19511pub fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t {
19512    unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
19513}
19514#[doc = "Extended table look-up"]
19515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"]
19516#[inline(always)]
19517#[target_feature(enable = "neon")]
19518#[cfg_attr(test, assert_instr(tbx))]
19519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19520pub fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
19521    unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
19522}
19523#[doc = "Extended table look-up"]
19524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"]
19525#[inline(always)]
19526#[target_feature(enable = "neon")]
19527#[cfg_attr(test, assert_instr(tbx))]
19528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19529pub fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t {
19530    unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
19531}
19532#[doc = "Extended table look-up"]
19533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"]
19534#[inline(always)]
19535#[target_feature(enable = "neon")]
19536#[cfg_attr(test, assert_instr(tbx))]
19537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19538pub fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t {
19539    unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
19540}
19541#[doc = "Extended table look-up"]
19542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"]
19543#[inline(always)]
19544#[target_feature(enable = "neon")]
19545#[cfg_attr(test, assert_instr(tbx))]
19546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19547fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
19548    unsafe extern "unadjusted" {
19549        #[cfg_attr(
19550            any(target_arch = "aarch64", target_arch = "arm64ec"),
19551            link_name = "llvm.aarch64.neon.tbx2.v8i8"
19552        )]
19553        fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
19554    }
19555    unsafe { _vqtbx2(a, b, c, d) }
19556}
19557#[doc = "Extended table look-up"]
19558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"]
19559#[inline(always)]
19560#[target_feature(enable = "neon")]
19561#[cfg_attr(test, assert_instr(tbx))]
19562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19563fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
19564    unsafe extern "unadjusted" {
19565        #[cfg_attr(
19566            any(target_arch = "aarch64", target_arch = "arm64ec"),
19567            link_name = "llvm.aarch64.neon.tbx2.v16i8"
19568        )]
19569        fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
19570    }
19571    unsafe { _vqtbx2q(a, b, c, d) }
19572}
19573#[doc = "Extended table look-up"]
19574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"]
19575#[inline(always)]
19576#[target_feature(enable = "neon")]
19577#[cfg_attr(test, assert_instr(tbx))]
19578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19579pub fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t {
19580    vqtbx2(a, b.0, b.1, c)
19581}
19582#[doc = "Extended table look-up"]
19583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"]
19584#[inline(always)]
19585#[target_feature(enable = "neon")]
19586#[cfg_attr(test, assert_instr(tbx))]
19587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19588pub fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t {
19589    vqtbx2q(a, b.0, b.1, c)
19590}
19591#[doc = "Extended table look-up"]
19592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
19593#[inline(always)]
19594#[target_feature(enable = "neon")]
19595#[cfg_attr(test, assert_instr(tbx))]
19596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19597pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
19598    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
19599}
19600#[doc = "Extended table look-up"]
19601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
19602#[inline(always)]
19603#[target_feature(enable = "neon")]
19604#[cfg_attr(test, assert_instr(tbx))]
19605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19606pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
19607    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
19608}
19609#[doc = "Extended table look-up"]
19610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
19611#[inline(always)]
19612#[target_feature(enable = "neon")]
19613#[cfg_attr(test, assert_instr(tbx))]
19614#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19615pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
19616    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
19617}
19618#[doc = "Extended table look-up"]
19619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
19620#[inline(always)]
19621#[target_feature(enable = "neon")]
19622#[cfg_attr(test, assert_instr(tbx))]
19623#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19624pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
19625    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
19626}
19627#[doc = "Extended table look-up"]
19628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"]
19629#[inline(always)]
19630#[target_feature(enable = "neon")]
19631#[cfg_attr(test, assert_instr(tbx))]
19632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19633fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
19634    unsafe extern "unadjusted" {
19635        #[cfg_attr(
19636            any(target_arch = "aarch64", target_arch = "arm64ec"),
19637            link_name = "llvm.aarch64.neon.tbx3.v8i8"
19638        )]
19639        fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t)
19640            -> int8x8_t;
19641    }
19642    unsafe { _vqtbx3(a, b, c, d, e) }
19643}
19644#[doc = "Extended table look-up"]
19645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"]
19646#[inline(always)]
19647#[target_feature(enable = "neon")]
19648#[cfg_attr(test, assert_instr(tbx))]
19649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19650fn vqtbx3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
19651    unsafe extern "unadjusted" {
19652        #[cfg_attr(
19653            any(target_arch = "aarch64", target_arch = "arm64ec"),
19654            link_name = "llvm.aarch64.neon.tbx3.v16i8"
19655        )]
19656        fn _vqtbx3q(
19657            a: int8x16_t,
19658            b: int8x16_t,
19659            c: int8x16_t,
19660            d: int8x16_t,
19661            e: uint8x16_t,
19662        ) -> int8x16_t;
19663    }
19664    unsafe { _vqtbx3q(a, b, c, d, e) }
19665}
19666#[doc = "Extended table look-up"]
19667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"]
19668#[inline(always)]
19669#[target_feature(enable = "neon")]
19670#[cfg_attr(test, assert_instr(tbx))]
19671#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19672pub fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t {
19673    vqtbx3(a, b.0, b.1, b.2, c)
19674}
19675#[doc = "Extended table look-up"]
19676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"]
19677#[inline(always)]
19678#[target_feature(enable = "neon")]
19679#[cfg_attr(test, assert_instr(tbx))]
19680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19681pub fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t {
19682    vqtbx3q(a, b.0, b.1, b.2, c)
19683}
19684#[doc = "Extended table look-up"]
19685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
19686#[inline(always)]
19687#[target_feature(enable = "neon")]
19688#[cfg_attr(test, assert_instr(tbx))]
19689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19690pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
19691    unsafe {
19692        transmute(vqtbx3(
19693            transmute(a),
19694            transmute(b.0),
19695            transmute(b.1),
19696            transmute(b.2),
19697            c,
19698        ))
19699    }
19700}
19701#[doc = "Extended table look-up"]
19702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
19703#[inline(always)]
19704#[target_feature(enable = "neon")]
19705#[cfg_attr(test, assert_instr(tbx))]
19706#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19707pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
19708    unsafe {
19709        transmute(vqtbx3q(
19710            transmute(a),
19711            transmute(b.0),
19712            transmute(b.1),
19713            transmute(b.2),
19714            c,
19715        ))
19716    }
19717}
19718#[doc = "Extended table look-up"]
19719#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
19720#[inline(always)]
19721#[target_feature(enable = "neon")]
19722#[cfg_attr(test, assert_instr(tbx))]
19723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19724pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
19725    unsafe {
19726        transmute(vqtbx3(
19727            transmute(a),
19728            transmute(b.0),
19729            transmute(b.1),
19730            transmute(b.2),
19731            c,
19732        ))
19733    }
19734}
19735#[doc = "Extended table look-up"]
19736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
19737#[inline(always)]
19738#[target_feature(enable = "neon")]
19739#[cfg_attr(test, assert_instr(tbx))]
19740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19741pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
19742    unsafe {
19743        transmute(vqtbx3q(
19744            transmute(a),
19745            transmute(b.0),
19746            transmute(b.1),
19747            transmute(b.2),
19748            c,
19749        ))
19750    }
19751}
19752#[doc = "Extended table look-up"]
19753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"]
19754#[inline(always)]
19755#[target_feature(enable = "neon")]
19756#[cfg_attr(test, assert_instr(tbx))]
19757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19758fn vqtbx4(
19759    a: int8x8_t,
19760    b: int8x16_t,
19761    c: int8x16_t,
19762    d: int8x16_t,
19763    e: int8x16_t,
19764    f: uint8x8_t,
19765) -> int8x8_t {
19766    unsafe extern "unadjusted" {
19767        #[cfg_attr(
19768            any(target_arch = "aarch64", target_arch = "arm64ec"),
19769            link_name = "llvm.aarch64.neon.tbx4.v8i8"
19770        )]
19771        fn _vqtbx4(
19772            a: int8x8_t,
19773            b: int8x16_t,
19774            c: int8x16_t,
19775            d: int8x16_t,
19776            e: int8x16_t,
19777            f: uint8x8_t,
19778        ) -> int8x8_t;
19779    }
19780    unsafe { _vqtbx4(a, b, c, d, e, f) }
19781}
19782#[doc = "Extended table look-up"]
19783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"]
19784#[inline(always)]
19785#[target_feature(enable = "neon")]
19786#[cfg_attr(test, assert_instr(tbx))]
19787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19788fn vqtbx4q(
19789    a: int8x16_t,
19790    b: int8x16_t,
19791    c: int8x16_t,
19792    d: int8x16_t,
19793    e: int8x16_t,
19794    f: uint8x16_t,
19795) -> int8x16_t {
19796    unsafe extern "unadjusted" {
19797        #[cfg_attr(
19798            any(target_arch = "aarch64", target_arch = "arm64ec"),
19799            link_name = "llvm.aarch64.neon.tbx4.v16i8"
19800        )]
19801        fn _vqtbx4q(
19802            a: int8x16_t,
19803            b: int8x16_t,
19804            c: int8x16_t,
19805            d: int8x16_t,
19806            e: int8x16_t,
19807            f: uint8x16_t,
19808        ) -> int8x16_t;
19809    }
19810    unsafe { _vqtbx4q(a, b, c, d, e, f) }
19811}
19812#[doc = "Extended table look-up"]
19813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"]
19814#[inline(always)]
19815#[target_feature(enable = "neon")]
19816#[cfg_attr(test, assert_instr(tbx))]
19817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19818pub fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t {
19819    vqtbx4(a, b.0, b.1, b.2, b.3, c)
19820}
19821#[doc = "Extended table look-up"]
19822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"]
19823#[inline(always)]
19824#[target_feature(enable = "neon")]
19825#[cfg_attr(test, assert_instr(tbx))]
19826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19827pub fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t {
19828    vqtbx4q(a, b.0, b.1, b.2, b.3, c)
19829}
19830#[doc = "Extended table look-up"]
19831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
19832#[inline(always)]
19833#[target_feature(enable = "neon")]
19834#[cfg_attr(test, assert_instr(tbx))]
19835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19836pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
19837    unsafe {
19838        transmute(vqtbx4(
19839            transmute(a),
19840            transmute(b.0),
19841            transmute(b.1),
19842            transmute(b.2),
19843            transmute(b.3),
19844            c,
19845        ))
19846    }
19847}
19848#[doc = "Extended table look-up"]
19849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
19850#[inline(always)]
19851#[target_feature(enable = "neon")]
19852#[cfg_attr(test, assert_instr(tbx))]
19853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19854pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
19855    unsafe {
19856        transmute(vqtbx4q(
19857            transmute(a),
19858            transmute(b.0),
19859            transmute(b.1),
19860            transmute(b.2),
19861            transmute(b.3),
19862            c,
19863        ))
19864    }
19865}
19866#[doc = "Extended table look-up"]
19867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
19868#[inline(always)]
19869#[target_feature(enable = "neon")]
19870#[cfg_attr(test, assert_instr(tbx))]
19871#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19872pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
19873    unsafe {
19874        transmute(vqtbx4(
19875            transmute(a),
19876            transmute(b.0),
19877            transmute(b.1),
19878            transmute(b.2),
19879            transmute(b.3),
19880            c,
19881        ))
19882    }
19883}
19884#[doc = "Extended table look-up"]
19885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
19886#[inline(always)]
19887#[target_feature(enable = "neon")]
19888#[cfg_attr(test, assert_instr(tbx))]
19889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19890pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
19891    unsafe {
19892        transmute(vqtbx4q(
19893            transmute(a),
19894            transmute(b.0),
19895            transmute(b.1),
19896            transmute(b.2),
19897            transmute(b.3),
19898            c,
19899        ))
19900    }
19901}
19902#[doc = "Rotate and exclusive OR"]
19903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"]
19904#[inline(always)]
19905#[target_feature(enable = "neon,sha3")]
19906#[cfg_attr(test, assert_instr(rax1))]
19907#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
19908pub fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
19909    unsafe extern "unadjusted" {
19910        #[cfg_attr(
19911            any(target_arch = "aarch64", target_arch = "arm64ec"),
19912            link_name = "llvm.aarch64.crypto.rax1"
19913        )]
19914        fn _vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
19915    }
19916    unsafe { _vrax1q_u64(a, b) }
19917}
19918#[doc = "Reverse bit order"]
19919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"]
19920#[inline(always)]
19921#[target_feature(enable = "neon")]
19922#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19923#[cfg_attr(test, assert_instr(rbit))]
19924pub fn vrbit_s8(a: int8x8_t) -> int8x8_t {
19925    unsafe { simd_bitreverse(a) }
19926}
19927#[doc = "Reverse bit order"]
19928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"]
19929#[inline(always)]
19930#[target_feature(enable = "neon")]
19931#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19932#[cfg_attr(test, assert_instr(rbit))]
19933pub fn vrbitq_s8(a: int8x16_t) -> int8x16_t {
19934    unsafe { simd_bitreverse(a) }
19935}
19936#[doc = "Reverse bit order"]
19937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
19938#[inline(always)]
19939#[cfg(target_endian = "little")]
19940#[target_feature(enable = "neon")]
19941#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19942#[cfg_attr(test, assert_instr(rbit))]
19943pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
19944    unsafe { transmute(vrbit_s8(transmute(a))) }
19945}
19946#[doc = "Reverse bit order"]
19947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
19948#[inline(always)]
19949#[cfg(target_endian = "big")]
19950#[target_feature(enable = "neon")]
19951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19952#[cfg_attr(test, assert_instr(rbit))]
19953pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
19954    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
19955    unsafe {
19956        let ret_val: uint8x8_t = transmute(vrbit_s8(transmute(a)));
19957        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19958    }
19959}
19960#[doc = "Reverse bit order"]
19961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
19962#[inline(always)]
19963#[cfg(target_endian = "little")]
19964#[target_feature(enable = "neon")]
19965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19966#[cfg_attr(test, assert_instr(rbit))]
19967pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
19968    unsafe { transmute(vrbitq_s8(transmute(a))) }
19969}
19970#[doc = "Reverse bit order"]
19971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
19972#[inline(always)]
19973#[cfg(target_endian = "big")]
19974#[target_feature(enable = "neon")]
19975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19976#[cfg_attr(test, assert_instr(rbit))]
19977pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
19978    let a: uint8x16_t =
19979        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19980    unsafe {
19981        let ret_val: uint8x16_t = transmute(vrbitq_s8(transmute(a)));
19982        simd_shuffle!(
19983            ret_val,
19984            ret_val,
19985            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19986        )
19987    }
19988}
19989#[doc = "Reverse bit order"]
19990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
19991#[inline(always)]
19992#[cfg(target_endian = "little")]
19993#[target_feature(enable = "neon")]
19994#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19995#[cfg_attr(test, assert_instr(rbit))]
19996pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
19997    unsafe { transmute(vrbit_s8(transmute(a))) }
19998}
19999#[doc = "Reverse bit order"]
20000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
20001#[inline(always)]
20002#[cfg(target_endian = "big")]
20003#[target_feature(enable = "neon")]
20004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20005#[cfg_attr(test, assert_instr(rbit))]
20006pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
20007    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20008    unsafe {
20009        let ret_val: poly8x8_t = transmute(vrbit_s8(transmute(a)));
20010        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20011    }
20012}
20013#[doc = "Reverse bit order"]
20014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
20015#[inline(always)]
20016#[cfg(target_endian = "little")]
20017#[target_feature(enable = "neon")]
20018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20019#[cfg_attr(test, assert_instr(rbit))]
20020pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
20021    unsafe { transmute(vrbitq_s8(transmute(a))) }
20022}
20023#[doc = "Reverse bit order"]
20024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
20025#[inline(always)]
20026#[cfg(target_endian = "big")]
20027#[target_feature(enable = "neon")]
20028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20029#[cfg_attr(test, assert_instr(rbit))]
20030pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
20031    let a: poly8x16_t =
20032        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20033    unsafe {
20034        let ret_val: poly8x16_t = transmute(vrbitq_s8(transmute(a)));
20035        simd_shuffle!(
20036            ret_val,
20037            ret_val,
20038            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20039        )
20040    }
20041}
20042#[doc = "Reciprocal estimate."]
20043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"]
20044#[inline(always)]
20045#[target_feature(enable = "neon")]
20046#[cfg_attr(test, assert_instr(frecpe))]
20047#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20048pub fn vrecpe_f64(a: float64x1_t) -> float64x1_t {
20049    unsafe extern "unadjusted" {
20050        #[cfg_attr(
20051            any(target_arch = "aarch64", target_arch = "arm64ec"),
20052            link_name = "llvm.aarch64.neon.frecpe.v1f64"
20053        )]
20054        fn _vrecpe_f64(a: float64x1_t) -> float64x1_t;
20055    }
20056    unsafe { _vrecpe_f64(a) }
20057}
20058#[doc = "Reciprocal estimate."]
20059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"]
20060#[inline(always)]
20061#[target_feature(enable = "neon")]
20062#[cfg_attr(test, assert_instr(frecpe))]
20063#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20064pub fn vrecpeq_f64(a: float64x2_t) -> float64x2_t {
20065    unsafe extern "unadjusted" {
20066        #[cfg_attr(
20067            any(target_arch = "aarch64", target_arch = "arm64ec"),
20068            link_name = "llvm.aarch64.neon.frecpe.v2f64"
20069        )]
20070        fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t;
20071    }
20072    unsafe { _vrecpeq_f64(a) }
20073}
20074#[doc = "Reciprocal estimate."]
20075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"]
20076#[inline(always)]
20077#[target_feature(enable = "neon")]
20078#[cfg_attr(test, assert_instr(frecpe))]
20079#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20080pub fn vrecped_f64(a: f64) -> f64 {
20081    unsafe extern "unadjusted" {
20082        #[cfg_attr(
20083            any(target_arch = "aarch64", target_arch = "arm64ec"),
20084            link_name = "llvm.aarch64.neon.frecpe.f64"
20085        )]
20086        fn _vrecped_f64(a: f64) -> f64;
20087    }
20088    unsafe { _vrecped_f64(a) }
20089}
20090#[doc = "Reciprocal estimate."]
20091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"]
20092#[inline(always)]
20093#[target_feature(enable = "neon")]
20094#[cfg_attr(test, assert_instr(frecpe))]
20095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20096pub fn vrecpes_f32(a: f32) -> f32 {
20097    unsafe extern "unadjusted" {
20098        #[cfg_attr(
20099            any(target_arch = "aarch64", target_arch = "arm64ec"),
20100            link_name = "llvm.aarch64.neon.frecpe.f32"
20101        )]
20102        fn _vrecpes_f32(a: f32) -> f32;
20103    }
20104    unsafe { _vrecpes_f32(a) }
20105}
20106#[doc = "Reciprocal estimate."]
20107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeh_f16)"]
20108#[inline(always)]
20109#[cfg_attr(test, assert_instr(frecpe))]
20110#[target_feature(enable = "neon,fp16")]
20111#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
20112#[cfg(not(target_arch = "arm64ec"))]
20113pub fn vrecpeh_f16(a: f16) -> f16 {
20114    unsafe extern "unadjusted" {
20115        #[cfg_attr(
20116            any(target_arch = "aarch64", target_arch = "arm64ec"),
20117            link_name = "llvm.aarch64.neon.frecpe.f16"
20118        )]
20119        fn _vrecpeh_f16(a: f16) -> f16;
20120    }
20121    unsafe { _vrecpeh_f16(a) }
20122}
20123#[doc = "Floating-point reciprocal step"]
20124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"]
20125#[inline(always)]
20126#[target_feature(enable = "neon")]
20127#[cfg_attr(test, assert_instr(frecps))]
20128#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20129pub fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
20130    unsafe extern "unadjusted" {
20131        #[cfg_attr(
20132            any(target_arch = "aarch64", target_arch = "arm64ec"),
20133            link_name = "llvm.aarch64.neon.frecps.v1f64"
20134        )]
20135        fn _vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
20136    }
20137    unsafe { _vrecps_f64(a, b) }
20138}
20139#[doc = "Floating-point reciprocal step"]
20140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"]
20141#[inline(always)]
20142#[target_feature(enable = "neon")]
20143#[cfg_attr(test, assert_instr(frecps))]
20144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20145pub fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
20146    unsafe extern "unadjusted" {
20147        #[cfg_attr(
20148            any(target_arch = "aarch64", target_arch = "arm64ec"),
20149            link_name = "llvm.aarch64.neon.frecps.v2f64"
20150        )]
20151        fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
20152    }
20153    unsafe { _vrecpsq_f64(a, b) }
20154}
20155#[doc = "Floating-point reciprocal step"]
20156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"]
20157#[inline(always)]
20158#[target_feature(enable = "neon")]
20159#[cfg_attr(test, assert_instr(frecps))]
20160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20161pub fn vrecpsd_f64(a: f64, b: f64) -> f64 {
20162    unsafe extern "unadjusted" {
20163        #[cfg_attr(
20164            any(target_arch = "aarch64", target_arch = "arm64ec"),
20165            link_name = "llvm.aarch64.neon.frecps.f64"
20166        )]
20167        fn _vrecpsd_f64(a: f64, b: f64) -> f64;
20168    }
20169    unsafe { _vrecpsd_f64(a, b) }
20170}
20171#[doc = "Floating-point reciprocal step"]
20172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"]
20173#[inline(always)]
20174#[target_feature(enable = "neon")]
20175#[cfg_attr(test, assert_instr(frecps))]
20176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20177pub fn vrecpss_f32(a: f32, b: f32) -> f32 {
20178    unsafe extern "unadjusted" {
20179        #[cfg_attr(
20180            any(target_arch = "aarch64", target_arch = "arm64ec"),
20181            link_name = "llvm.aarch64.neon.frecps.f32"
20182        )]
20183        fn _vrecpss_f32(a: f32, b: f32) -> f32;
20184    }
20185    unsafe { _vrecpss_f32(a, b) }
20186}
20187#[doc = "Floating-point reciprocal step"]
20188#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsh_f16)"]
20189#[inline(always)]
20190#[cfg_attr(test, assert_instr(frecps))]
20191#[target_feature(enable = "neon,fp16")]
20192#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
20193#[cfg(not(target_arch = "arm64ec"))]
20194pub fn vrecpsh_f16(a: f16, b: f16) -> f16 {
20195    unsafe extern "unadjusted" {
20196        #[cfg_attr(
20197            any(target_arch = "aarch64", target_arch = "arm64ec"),
20198            link_name = "llvm.aarch64.neon.frecps.f16"
20199        )]
20200        fn _vrecpsh_f16(a: f16, b: f16) -> f16;
20201    }
20202    unsafe { _vrecpsh_f16(a, b) }
20203}
20204#[doc = "Floating-point reciprocal exponent"]
20205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"]
20206#[inline(always)]
20207#[target_feature(enable = "neon")]
20208#[cfg_attr(test, assert_instr(frecpx))]
20209#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20210pub fn vrecpxd_f64(a: f64) -> f64 {
20211    unsafe extern "unadjusted" {
20212        #[cfg_attr(
20213            any(target_arch = "aarch64", target_arch = "arm64ec"),
20214            link_name = "llvm.aarch64.neon.frecpx.f64"
20215        )]
20216        fn _vrecpxd_f64(a: f64) -> f64;
20217    }
20218    unsafe { _vrecpxd_f64(a) }
20219}
20220#[doc = "Floating-point reciprocal exponent"]
20221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"]
20222#[inline(always)]
20223#[target_feature(enable = "neon")]
20224#[cfg_attr(test, assert_instr(frecpx))]
20225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20226pub fn vrecpxs_f32(a: f32) -> f32 {
20227    unsafe extern "unadjusted" {
20228        #[cfg_attr(
20229            any(target_arch = "aarch64", target_arch = "arm64ec"),
20230            link_name = "llvm.aarch64.neon.frecpx.f32"
20231        )]
20232        fn _vrecpxs_f32(a: f32) -> f32;
20233    }
20234    unsafe { _vrecpxs_f32(a) }
20235}
20236#[doc = "Floating-point reciprocal exponent"]
20237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxh_f16)"]
20238#[inline(always)]
20239#[cfg_attr(test, assert_instr(frecpx))]
20240#[target_feature(enable = "neon,fp16")]
20241#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
20242#[cfg(not(target_arch = "arm64ec"))]
20243pub fn vrecpxh_f16(a: f16) -> f16 {
20244    unsafe extern "unadjusted" {
20245        #[cfg_attr(
20246            any(target_arch = "aarch64", target_arch = "arm64ec"),
20247            link_name = "llvm.aarch64.neon.frecpx.f16"
20248        )]
20249        fn _vrecpxh_f16(a: f16) -> f16;
20250    }
20251    unsafe { _vrecpxh_f16(a) }
20252}
20253#[doc = "Vector reinterpret cast operation"]
20254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
20255#[inline(always)]
20256#[cfg(target_endian = "little")]
20257#[target_feature(enable = "neon")]
20258#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20259#[cfg(not(target_arch = "arm64ec"))]
20260#[cfg_attr(test, assert_instr(nop))]
20261pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
20262    unsafe { transmute(a) }
20263}
20264#[doc = "Vector reinterpret cast operation"]
20265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
20266#[inline(always)]
20267#[cfg(target_endian = "big")]
20268#[target_feature(enable = "neon")]
20269#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20270#[cfg(not(target_arch = "arm64ec"))]
20271#[cfg_attr(test, assert_instr(nop))]
20272pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
20273    let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
20274    unsafe { transmute(a) }
20275}
20276#[doc = "Vector reinterpret cast operation"]
20277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
20278#[inline(always)]
20279#[cfg(target_endian = "little")]
20280#[target_feature(enable = "neon")]
20281#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20282#[cfg(not(target_arch = "arm64ec"))]
20283#[cfg_attr(test, assert_instr(nop))]
20284pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
20285    unsafe { transmute(a) }
20286}
20287#[doc = "Vector reinterpret cast operation"]
20288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
20289#[inline(always)]
20290#[cfg(target_endian = "big")]
20291#[target_feature(enable = "neon")]
20292#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20293#[cfg(not(target_arch = "arm64ec"))]
20294#[cfg_attr(test, assert_instr(nop))]
20295pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
20296    let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20297    unsafe {
20298        let ret_val: float64x2_t = transmute(a);
20299        simd_shuffle!(ret_val, ret_val, [1, 0])
20300    }
20301}
20302#[doc = "Vector reinterpret cast operation"]
20303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
20304#[inline(always)]
20305#[cfg(target_endian = "little")]
20306#[target_feature(enable = "neon")]
20307#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20308#[cfg(not(target_arch = "arm64ec"))]
20309#[cfg_attr(test, assert_instr(nop))]
20310pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
20311    unsafe { transmute(a) }
20312}
20313#[doc = "Vector reinterpret cast operation"]
20314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
20315#[inline(always)]
20316#[cfg(target_endian = "big")]
20317#[target_feature(enable = "neon")]
20318#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20319#[cfg(not(target_arch = "arm64ec"))]
20320#[cfg_attr(test, assert_instr(nop))]
20321pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
20322    unsafe {
20323        let ret_val: float16x4_t = transmute(a);
20324        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20325    }
20326}
20327#[doc = "Vector reinterpret cast operation"]
20328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
20329#[inline(always)]
20330#[cfg(target_endian = "little")]
20331#[target_feature(enable = "neon")]
20332#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20333#[cfg(not(target_arch = "arm64ec"))]
20334#[cfg_attr(test, assert_instr(nop))]
20335pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
20336    unsafe { transmute(a) }
20337}
20338#[doc = "Vector reinterpret cast operation"]
20339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
20340#[inline(always)]
20341#[cfg(target_endian = "big")]
20342#[target_feature(enable = "neon")]
20343#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
20344#[cfg(not(target_arch = "arm64ec"))]
20345#[cfg_attr(test, assert_instr(nop))]
20346pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
20347    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20348    unsafe {
20349        let ret_val: float16x8_t = transmute(a);
20350        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20351    }
20352}
20353#[doc = "Vector reinterpret cast operation"]
20354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
20355#[inline(always)]
20356#[cfg(target_endian = "little")]
20357#[target_feature(enable = "neon")]
20358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20359#[cfg_attr(test, assert_instr(nop))]
20360pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
20361    unsafe { transmute(a) }
20362}
20363#[doc = "Vector reinterpret cast operation"]
20364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
20365#[inline(always)]
20366#[cfg(target_endian = "big")]
20367#[target_feature(enable = "neon")]
20368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20369#[cfg_attr(test, assert_instr(nop))]
20370pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
20371    unsafe {
20372        let ret_val: float64x2_t = transmute(a);
20373        simd_shuffle!(ret_val, ret_val, [1, 0])
20374    }
20375}
20376#[doc = "Vector reinterpret cast operation"]
20377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
20378#[inline(always)]
20379#[cfg(target_endian = "little")]
20380#[target_feature(enable = "neon")]
20381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20382#[cfg_attr(test, assert_instr(nop))]
20383pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
20384    unsafe { transmute(a) }
20385}
20386#[doc = "Vector reinterpret cast operation"]
20387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
20388#[inline(always)]
20389#[cfg(target_endian = "big")]
20390#[target_feature(enable = "neon")]
20391#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20392#[cfg_attr(test, assert_instr(nop))]
20393pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
20394    let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20395    unsafe { transmute(a) }
20396}
20397#[doc = "Vector reinterpret cast operation"]
20398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
20399#[inline(always)]
20400#[cfg(target_endian = "little")]
20401#[target_feature(enable = "neon")]
20402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20403#[cfg_attr(test, assert_instr(nop))]
20404pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
20405    unsafe { transmute(a) }
20406}
20407#[doc = "Vector reinterpret cast operation"]
20408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
20409#[inline(always)]
20410#[cfg(target_endian = "big")]
20411#[target_feature(enable = "neon")]
20412#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20413#[cfg_attr(test, assert_instr(nop))]
20414pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
20415    let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20416    unsafe { transmute(a) }
20417}
20418#[doc = "Vector reinterpret cast operation"]
20419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
20420#[inline(always)]
20421#[cfg(target_endian = "little")]
20422#[target_feature(enable = "neon")]
20423#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20424#[cfg_attr(test, assert_instr(nop))]
20425pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
20426    unsafe { transmute(a) }
20427}
20428#[doc = "Vector reinterpret cast operation"]
20429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
20430#[inline(always)]
20431#[cfg(target_endian = "big")]
20432#[target_feature(enable = "neon")]
20433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20434#[cfg_attr(test, assert_instr(nop))]
20435pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
20436    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
20437    unsafe {
20438        let ret_val: float64x2_t = transmute(a);
20439        simd_shuffle!(ret_val, ret_val, [1, 0])
20440    }
20441}
20442#[doc = "Vector reinterpret cast operation"]
20443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
20444#[inline(always)]
20445#[cfg(target_endian = "little")]
20446#[target_feature(enable = "neon")]
20447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20448#[cfg_attr(test, assert_instr(nop))]
20449pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
20450    unsafe { transmute(a) }
20451}
20452#[doc = "Vector reinterpret cast operation"]
20453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
20454#[inline(always)]
20455#[cfg(target_endian = "big")]
20456#[target_feature(enable = "neon")]
20457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20458#[cfg_attr(test, assert_instr(nop))]
20459pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
20460    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
20461    unsafe {
20462        let ret_val: poly64x2_t = transmute(a);
20463        simd_shuffle!(ret_val, ret_val, [1, 0])
20464    }
20465}
20466#[doc = "Vector reinterpret cast operation"]
20467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
20468#[inline(always)]
20469#[cfg(target_endian = "little")]
20470#[target_feature(enable = "neon")]
20471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20472#[cfg_attr(test, assert_instr(nop))]
20473pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
20474    unsafe { transmute(a) }
20475}
20476#[doc = "Vector reinterpret cast operation"]
20477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
20478#[inline(always)]
20479#[cfg(target_endian = "big")]
20480#[target_feature(enable = "neon")]
20481#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20482#[cfg_attr(test, assert_instr(nop))]
20483pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
20484    unsafe {
20485        let ret_val: float32x2_t = transmute(a);
20486        simd_shuffle!(ret_val, ret_val, [1, 0])
20487    }
20488}
20489#[doc = "Vector reinterpret cast operation"]
20490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
20491#[inline(always)]
20492#[cfg(target_endian = "little")]
20493#[target_feature(enable = "neon")]
20494#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20495#[cfg_attr(test, assert_instr(nop))]
20496pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
20497    unsafe { transmute(a) }
20498}
20499#[doc = "Vector reinterpret cast operation"]
20500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
20501#[inline(always)]
20502#[cfg(target_endian = "big")]
20503#[target_feature(enable = "neon")]
20504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20505#[cfg_attr(test, assert_instr(nop))]
20506pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
20507    unsafe {
20508        let ret_val: int8x8_t = transmute(a);
20509        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20510    }
20511}
20512#[doc = "Vector reinterpret cast operation"]
20513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
20514#[inline(always)]
20515#[cfg(target_endian = "little")]
20516#[target_feature(enable = "neon")]
20517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20518#[cfg_attr(test, assert_instr(nop))]
20519pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
20520    unsafe { transmute(a) }
20521}
20522#[doc = "Vector reinterpret cast operation"]
20523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
20524#[inline(always)]
20525#[cfg(target_endian = "big")]
20526#[target_feature(enable = "neon")]
20527#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20528#[cfg_attr(test, assert_instr(nop))]
20529pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
20530    unsafe {
20531        let ret_val: int16x4_t = transmute(a);
20532        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20533    }
20534}
20535#[doc = "Vector reinterpret cast operation"]
20536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
20537#[inline(always)]
20538#[cfg(target_endian = "little")]
20539#[target_feature(enable = "neon")]
20540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20541#[cfg_attr(test, assert_instr(nop))]
20542pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
20543    unsafe { transmute(a) }
20544}
20545#[doc = "Vector reinterpret cast operation"]
20546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
20547#[inline(always)]
20548#[cfg(target_endian = "big")]
20549#[target_feature(enable = "neon")]
20550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20551#[cfg_attr(test, assert_instr(nop))]
20552pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
20553    unsafe {
20554        let ret_val: int32x2_t = transmute(a);
20555        simd_shuffle!(ret_val, ret_val, [1, 0])
20556    }
20557}
20558#[doc = "Vector reinterpret cast operation"]
20559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"]
20560#[inline(always)]
20561#[target_feature(enable = "neon")]
20562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20563#[cfg_attr(test, assert_instr(nop))]
20564pub fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t {
20565    unsafe { transmute(a) }
20566}
20567#[doc = "Vector reinterpret cast operation"]
20568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
20569#[inline(always)]
20570#[cfg(target_endian = "little")]
20571#[target_feature(enable = "neon")]
20572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20573#[cfg_attr(test, assert_instr(nop))]
20574pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
20575    unsafe { transmute(a) }
20576}
20577#[doc = "Vector reinterpret cast operation"]
20578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
20579#[inline(always)]
20580#[cfg(target_endian = "big")]
20581#[target_feature(enable = "neon")]
20582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20583#[cfg_attr(test, assert_instr(nop))]
20584pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
20585    unsafe {
20586        let ret_val: uint8x8_t = transmute(a);
20587        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20588    }
20589}
20590#[doc = "Vector reinterpret cast operation"]
20591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
20592#[inline(always)]
20593#[cfg(target_endian = "little")]
20594#[target_feature(enable = "neon")]
20595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20596#[cfg_attr(test, assert_instr(nop))]
20597pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
20598    unsafe { transmute(a) }
20599}
20600#[doc = "Vector reinterpret cast operation"]
20601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
20602#[inline(always)]
20603#[cfg(target_endian = "big")]
20604#[target_feature(enable = "neon")]
20605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20606#[cfg_attr(test, assert_instr(nop))]
20607pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
20608    unsafe {
20609        let ret_val: uint16x4_t = transmute(a);
20610        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20611    }
20612}
20613#[doc = "Vector reinterpret cast operation"]
20614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
20615#[inline(always)]
20616#[cfg(target_endian = "little")]
20617#[target_feature(enable = "neon")]
20618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20619#[cfg_attr(test, assert_instr(nop))]
20620pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
20621    unsafe { transmute(a) }
20622}
20623#[doc = "Vector reinterpret cast operation"]
20624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
20625#[inline(always)]
20626#[cfg(target_endian = "big")]
20627#[target_feature(enable = "neon")]
20628#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20629#[cfg_attr(test, assert_instr(nop))]
20630pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
20631    unsafe {
20632        let ret_val: uint32x2_t = transmute(a);
20633        simd_shuffle!(ret_val, ret_val, [1, 0])
20634    }
20635}
20636#[doc = "Vector reinterpret cast operation"]
20637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"]
20638#[inline(always)]
20639#[target_feature(enable = "neon")]
20640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20641#[cfg_attr(test, assert_instr(nop))]
20642pub fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t {
20643    unsafe { transmute(a) }
20644}
20645#[doc = "Vector reinterpret cast operation"]
20646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
20647#[inline(always)]
20648#[cfg(target_endian = "little")]
20649#[target_feature(enable = "neon")]
20650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20651#[cfg_attr(test, assert_instr(nop))]
20652pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
20653    unsafe { transmute(a) }
20654}
20655#[doc = "Vector reinterpret cast operation"]
20656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
20657#[inline(always)]
20658#[cfg(target_endian = "big")]
20659#[target_feature(enable = "neon")]
20660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20661#[cfg_attr(test, assert_instr(nop))]
20662pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
20663    unsafe {
20664        let ret_val: poly8x8_t = transmute(a);
20665        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20666    }
20667}
20668#[doc = "Vector reinterpret cast operation"]
20669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
20670#[inline(always)]
20671#[cfg(target_endian = "little")]
20672#[target_feature(enable = "neon")]
20673#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20674#[cfg_attr(test, assert_instr(nop))]
20675pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
20676    unsafe { transmute(a) }
20677}
20678#[doc = "Vector reinterpret cast operation"]
20679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
20680#[inline(always)]
20681#[cfg(target_endian = "big")]
20682#[target_feature(enable = "neon")]
20683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20684#[cfg_attr(test, assert_instr(nop))]
20685pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
20686    unsafe {
20687        let ret_val: poly16x4_t = transmute(a);
20688        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20689    }
20690}
20691#[doc = "Vector reinterpret cast operation"]
20692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"]
20693#[inline(always)]
20694#[target_feature(enable = "neon")]
20695#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20696#[cfg_attr(test, assert_instr(nop))]
20697pub fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t {
20698    unsafe { transmute(a) }
20699}
20700#[doc = "Vector reinterpret cast operation"]
20701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
20702#[inline(always)]
20703#[cfg(target_endian = "little")]
20704#[target_feature(enable = "neon")]
20705#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20706#[cfg_attr(test, assert_instr(nop))]
20707pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
20708    unsafe { transmute(a) }
20709}
20710#[doc = "Vector reinterpret cast operation"]
20711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
20712#[inline(always)]
20713#[cfg(target_endian = "big")]
20714#[target_feature(enable = "neon")]
20715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20716#[cfg_attr(test, assert_instr(nop))]
20717pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
20718    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20719    unsafe { transmute(a) }
20720}
20721#[doc = "Vector reinterpret cast operation"]
20722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
20723#[inline(always)]
20724#[cfg(target_endian = "little")]
20725#[target_feature(enable = "neon")]
20726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20727#[cfg_attr(test, assert_instr(nop))]
20728pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
20729    unsafe { transmute(a) }
20730}
20731#[doc = "Vector reinterpret cast operation"]
20732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
20733#[inline(always)]
20734#[cfg(target_endian = "big")]
20735#[target_feature(enable = "neon")]
20736#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20737#[cfg_attr(test, assert_instr(nop))]
20738pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
20739    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20740    unsafe {
20741        let ret_val: float32x4_t = transmute(a);
20742        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20743    }
20744}
20745#[doc = "Vector reinterpret cast operation"]
20746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
20747#[inline(always)]
20748#[cfg(target_endian = "little")]
20749#[target_feature(enable = "neon")]
20750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20751#[cfg_attr(test, assert_instr(nop))]
20752pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
20753    unsafe { transmute(a) }
20754}
20755#[doc = "Vector reinterpret cast operation"]
20756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
20757#[inline(always)]
20758#[cfg(target_endian = "big")]
20759#[target_feature(enable = "neon")]
20760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20761#[cfg_attr(test, assert_instr(nop))]
20762pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
20763    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20764    unsafe {
20765        let ret_val: int8x16_t = transmute(a);
20766        simd_shuffle!(
20767            ret_val,
20768            ret_val,
20769            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20770        )
20771    }
20772}
20773#[doc = "Vector reinterpret cast operation"]
20774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
20775#[inline(always)]
20776#[cfg(target_endian = "little")]
20777#[target_feature(enable = "neon")]
20778#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20779#[cfg_attr(test, assert_instr(nop))]
20780pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
20781    unsafe { transmute(a) }
20782}
20783#[doc = "Vector reinterpret cast operation"]
20784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
20785#[inline(always)]
20786#[cfg(target_endian = "big")]
20787#[target_feature(enable = "neon")]
20788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20789#[cfg_attr(test, assert_instr(nop))]
20790pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
20791    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20792    unsafe {
20793        let ret_val: int16x8_t = transmute(a);
20794        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20795    }
20796}
20797#[doc = "Vector reinterpret cast operation"]
20798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
20799#[inline(always)]
20800#[cfg(target_endian = "little")]
20801#[target_feature(enable = "neon")]
20802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20803#[cfg_attr(test, assert_instr(nop))]
20804pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
20805    unsafe { transmute(a) }
20806}
20807#[doc = "Vector reinterpret cast operation"]
20808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
20809#[inline(always)]
20810#[cfg(target_endian = "big")]
20811#[target_feature(enable = "neon")]
20812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20813#[cfg_attr(test, assert_instr(nop))]
20814pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
20815    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20816    unsafe {
20817        let ret_val: int32x4_t = transmute(a);
20818        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20819    }
20820}
20821#[doc = "Vector reinterpret cast operation"]
20822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
20823#[inline(always)]
20824#[cfg(target_endian = "little")]
20825#[target_feature(enable = "neon")]
20826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20827#[cfg_attr(test, assert_instr(nop))]
20828pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
20829    unsafe { transmute(a) }
20830}
20831#[doc = "Vector reinterpret cast operation"]
20832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
20833#[inline(always)]
20834#[cfg(target_endian = "big")]
20835#[target_feature(enable = "neon")]
20836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20837#[cfg_attr(test, assert_instr(nop))]
20838pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
20839    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20840    unsafe {
20841        let ret_val: int64x2_t = transmute(a);
20842        simd_shuffle!(ret_val, ret_val, [1, 0])
20843    }
20844}
20845#[doc = "Vector reinterpret cast operation"]
20846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
20847#[inline(always)]
20848#[cfg(target_endian = "little")]
20849#[target_feature(enable = "neon")]
20850#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20851#[cfg_attr(test, assert_instr(nop))]
20852pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
20853    unsafe { transmute(a) }
20854}
20855#[doc = "Vector reinterpret cast operation"]
20856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
20857#[inline(always)]
20858#[cfg(target_endian = "big")]
20859#[target_feature(enable = "neon")]
20860#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20861#[cfg_attr(test, assert_instr(nop))]
20862pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
20863    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20864    unsafe {
20865        let ret_val: uint8x16_t = transmute(a);
20866        simd_shuffle!(
20867            ret_val,
20868            ret_val,
20869            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20870        )
20871    }
20872}
20873#[doc = "Vector reinterpret cast operation"]
20874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
20875#[inline(always)]
20876#[cfg(target_endian = "little")]
20877#[target_feature(enable = "neon")]
20878#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20879#[cfg_attr(test, assert_instr(nop))]
20880pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
20881    unsafe { transmute(a) }
20882}
20883#[doc = "Vector reinterpret cast operation"]
20884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
20885#[inline(always)]
20886#[cfg(target_endian = "big")]
20887#[target_feature(enable = "neon")]
20888#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20889#[cfg_attr(test, assert_instr(nop))]
20890pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
20891    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20892    unsafe {
20893        let ret_val: uint16x8_t = transmute(a);
20894        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20895    }
20896}
20897#[doc = "Vector reinterpret cast operation"]
20898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
20899#[inline(always)]
20900#[cfg(target_endian = "little")]
20901#[target_feature(enable = "neon")]
20902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20903#[cfg_attr(test, assert_instr(nop))]
20904pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
20905    unsafe { transmute(a) }
20906}
20907#[doc = "Vector reinterpret cast operation"]
20908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
20909#[inline(always)]
20910#[cfg(target_endian = "big")]
20911#[target_feature(enable = "neon")]
20912#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20913#[cfg_attr(test, assert_instr(nop))]
20914pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
20915    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20916    unsafe {
20917        let ret_val: uint32x4_t = transmute(a);
20918        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
20919    }
20920}
20921#[doc = "Vector reinterpret cast operation"]
20922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
20923#[inline(always)]
20924#[cfg(target_endian = "little")]
20925#[target_feature(enable = "neon")]
20926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20927#[cfg_attr(test, assert_instr(nop))]
20928pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
20929    unsafe { transmute(a) }
20930}
20931#[doc = "Vector reinterpret cast operation"]
20932#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
20933#[inline(always)]
20934#[cfg(target_endian = "big")]
20935#[target_feature(enable = "neon")]
20936#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20937#[cfg_attr(test, assert_instr(nop))]
20938pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
20939    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20940    unsafe {
20941        let ret_val: uint64x2_t = transmute(a);
20942        simd_shuffle!(ret_val, ret_val, [1, 0])
20943    }
20944}
20945#[doc = "Vector reinterpret cast operation"]
20946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
20947#[inline(always)]
20948#[cfg(target_endian = "little")]
20949#[target_feature(enable = "neon")]
20950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20951#[cfg_attr(test, assert_instr(nop))]
20952pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
20953    unsafe { transmute(a) }
20954}
20955#[doc = "Vector reinterpret cast operation"]
20956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
20957#[inline(always)]
20958#[cfg(target_endian = "big")]
20959#[target_feature(enable = "neon")]
20960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20961#[cfg_attr(test, assert_instr(nop))]
20962pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
20963    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20964    unsafe {
20965        let ret_val: poly8x16_t = transmute(a);
20966        simd_shuffle!(
20967            ret_val,
20968            ret_val,
20969            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20970        )
20971    }
20972}
20973#[doc = "Vector reinterpret cast operation"]
20974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
20975#[inline(always)]
20976#[cfg(target_endian = "little")]
20977#[target_feature(enable = "neon")]
20978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20979#[cfg_attr(test, assert_instr(nop))]
20980pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
20981    unsafe { transmute(a) }
20982}
20983#[doc = "Vector reinterpret cast operation"]
20984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
20985#[inline(always)]
20986#[cfg(target_endian = "big")]
20987#[target_feature(enable = "neon")]
20988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20989#[cfg_attr(test, assert_instr(nop))]
20990pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
20991    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
20992    unsafe {
20993        let ret_val: poly16x8_t = transmute(a);
20994        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20995    }
20996}
20997#[doc = "Vector reinterpret cast operation"]
20998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
20999#[inline(always)]
21000#[cfg(target_endian = "little")]
21001#[target_feature(enable = "neon")]
21002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21003#[cfg_attr(test, assert_instr(nop))]
21004pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
21005    unsafe { transmute(a) }
21006}
21007#[doc = "Vector reinterpret cast operation"]
21008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
21009#[inline(always)]
21010#[cfg(target_endian = "big")]
21011#[target_feature(enable = "neon")]
21012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21013#[cfg_attr(test, assert_instr(nop))]
21014pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
21015    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21016    unsafe {
21017        let ret_val: poly64x2_t = transmute(a);
21018        simd_shuffle!(ret_val, ret_val, [1, 0])
21019    }
21020}
21021#[doc = "Vector reinterpret cast operation"]
21022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
21023#[inline(always)]
21024#[cfg(target_endian = "little")]
21025#[target_feature(enable = "neon")]
21026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21027#[cfg_attr(test, assert_instr(nop))]
21028pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
21029    unsafe { transmute(a) }
21030}
21031#[doc = "Vector reinterpret cast operation"]
21032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
21033#[inline(always)]
21034#[cfg(target_endian = "big")]
21035#[target_feature(enable = "neon")]
21036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21037#[cfg_attr(test, assert_instr(nop))]
21038pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
21039    let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21040    unsafe { transmute(a) }
21041}
21042#[doc = "Vector reinterpret cast operation"]
21043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
21044#[inline(always)]
21045#[cfg(target_endian = "little")]
21046#[target_feature(enable = "neon")]
21047#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21048#[cfg_attr(test, assert_instr(nop))]
21049pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
21050    unsafe { transmute(a) }
21051}
21052#[doc = "Vector reinterpret cast operation"]
21053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
21054#[inline(always)]
21055#[cfg(target_endian = "big")]
21056#[target_feature(enable = "neon")]
21057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21058#[cfg_attr(test, assert_instr(nop))]
21059pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
21060    let a: int8x16_t =
21061        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21062    unsafe {
21063        let ret_val: float64x2_t = transmute(a);
21064        simd_shuffle!(ret_val, ret_val, [1, 0])
21065    }
21066}
21067#[doc = "Vector reinterpret cast operation"]
21068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
21069#[inline(always)]
21070#[cfg(target_endian = "little")]
21071#[target_feature(enable = "neon")]
21072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21073#[cfg_attr(test, assert_instr(nop))]
21074pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
21075    unsafe { transmute(a) }
21076}
21077#[doc = "Vector reinterpret cast operation"]
21078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
21079#[inline(always)]
21080#[cfg(target_endian = "big")]
21081#[target_feature(enable = "neon")]
21082#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21083#[cfg_attr(test, assert_instr(nop))]
21084pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
21085    let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21086    unsafe { transmute(a) }
21087}
21088#[doc = "Vector reinterpret cast operation"]
21089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
21090#[inline(always)]
21091#[cfg(target_endian = "little")]
21092#[target_feature(enable = "neon")]
21093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21094#[cfg_attr(test, assert_instr(nop))]
21095pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
21096    unsafe { transmute(a) }
21097}
21098#[doc = "Vector reinterpret cast operation"]
21099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
21100#[inline(always)]
21101#[cfg(target_endian = "big")]
21102#[target_feature(enable = "neon")]
21103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21104#[cfg_attr(test, assert_instr(nop))]
21105pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
21106    let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21107    unsafe {
21108        let ret_val: float64x2_t = transmute(a);
21109        simd_shuffle!(ret_val, ret_val, [1, 0])
21110    }
21111}
21112#[doc = "Vector reinterpret cast operation"]
21113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
21114#[inline(always)]
21115#[cfg(target_endian = "little")]
21116#[target_feature(enable = "neon")]
21117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21118#[cfg_attr(test, assert_instr(nop))]
21119pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
21120    unsafe { transmute(a) }
21121}
21122#[doc = "Vector reinterpret cast operation"]
21123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
21124#[inline(always)]
21125#[cfg(target_endian = "big")]
21126#[target_feature(enable = "neon")]
21127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21128#[cfg_attr(test, assert_instr(nop))]
21129pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
21130    let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21131    unsafe { transmute(a) }
21132}
21133#[doc = "Vector reinterpret cast operation"]
21134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
21135#[inline(always)]
21136#[cfg(target_endian = "little")]
21137#[target_feature(enable = "neon")]
21138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21139#[cfg_attr(test, assert_instr(nop))]
21140pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
21141    unsafe { transmute(a) }
21142}
21143#[doc = "Vector reinterpret cast operation"]
21144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
21145#[inline(always)]
21146#[cfg(target_endian = "big")]
21147#[target_feature(enable = "neon")]
21148#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21149#[cfg_attr(test, assert_instr(nop))]
21150pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
21151    let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21152    unsafe {
21153        let ret_val: float64x2_t = transmute(a);
21154        simd_shuffle!(ret_val, ret_val, [1, 0])
21155    }
21156}
21157#[doc = "Vector reinterpret cast operation"]
21158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"]
21159#[inline(always)]
21160#[target_feature(enable = "neon")]
21161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21162#[cfg_attr(test, assert_instr(nop))]
21163pub fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t {
21164    unsafe { transmute(a) }
21165}
21166#[doc = "Vector reinterpret cast operation"]
21167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"]
21168#[inline(always)]
21169#[target_feature(enable = "neon")]
21170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21171#[cfg_attr(test, assert_instr(nop))]
21172pub fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t {
21173    unsafe { transmute(a) }
21174}
21175#[doc = "Vector reinterpret cast operation"]
21176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
21177#[inline(always)]
21178#[cfg(target_endian = "little")]
21179#[target_feature(enable = "neon")]
21180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21181#[cfg_attr(test, assert_instr(nop))]
21182pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
21183    unsafe { transmute(a) }
21184}
21185#[doc = "Vector reinterpret cast operation"]
21186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
21187#[inline(always)]
21188#[cfg(target_endian = "big")]
21189#[target_feature(enable = "neon")]
21190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21191#[cfg_attr(test, assert_instr(nop))]
21192pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
21193    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21194    unsafe {
21195        let ret_val: float64x2_t = transmute(a);
21196        simd_shuffle!(ret_val, ret_val, [1, 0])
21197    }
21198}
21199#[doc = "Vector reinterpret cast operation"]
21200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
21201#[inline(always)]
21202#[cfg(target_endian = "little")]
21203#[target_feature(enable = "neon")]
21204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21205#[cfg_attr(test, assert_instr(nop))]
21206pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
21207    unsafe { transmute(a) }
21208}
21209#[doc = "Vector reinterpret cast operation"]
21210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
21211#[inline(always)]
21212#[cfg(target_endian = "big")]
21213#[target_feature(enable = "neon")]
21214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21215#[cfg_attr(test, assert_instr(nop))]
21216pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
21217    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21218    unsafe {
21219        let ret_val: poly64x2_t = transmute(a);
21220        simd_shuffle!(ret_val, ret_val, [1, 0])
21221    }
21222}
21223#[doc = "Vector reinterpret cast operation"]
21224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
21225#[inline(always)]
21226#[cfg(target_endian = "little")]
21227#[target_feature(enable = "neon")]
21228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21229#[cfg_attr(test, assert_instr(nop))]
21230pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
21231    unsafe { transmute(a) }
21232}
21233#[doc = "Vector reinterpret cast operation"]
21234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
21235#[inline(always)]
21236#[cfg(target_endian = "big")]
21237#[target_feature(enable = "neon")]
21238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21239#[cfg_attr(test, assert_instr(nop))]
21240pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
21241    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21242    unsafe { transmute(a) }
21243}
21244#[doc = "Vector reinterpret cast operation"]
21245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
21246#[inline(always)]
21247#[cfg(target_endian = "little")]
21248#[target_feature(enable = "neon")]
21249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21250#[cfg_attr(test, assert_instr(nop))]
21251pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
21252    unsafe { transmute(a) }
21253}
21254#[doc = "Vector reinterpret cast operation"]
21255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
21256#[inline(always)]
21257#[cfg(target_endian = "big")]
21258#[target_feature(enable = "neon")]
21259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21260#[cfg_attr(test, assert_instr(nop))]
21261pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
21262    let a: uint8x16_t =
21263        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21264    unsafe {
21265        let ret_val: float64x2_t = transmute(a);
21266        simd_shuffle!(ret_val, ret_val, [1, 0])
21267    }
21268}
21269#[doc = "Vector reinterpret cast operation"]
21270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
21271#[inline(always)]
21272#[cfg(target_endian = "little")]
21273#[target_feature(enable = "neon")]
21274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21275#[cfg_attr(test, assert_instr(nop))]
21276pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
21277    unsafe { transmute(a) }
21278}
21279#[doc = "Vector reinterpret cast operation"]
21280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
21281#[inline(always)]
21282#[cfg(target_endian = "big")]
21283#[target_feature(enable = "neon")]
21284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21285#[cfg_attr(test, assert_instr(nop))]
21286pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
21287    let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21288    unsafe { transmute(a) }
21289}
21290#[doc = "Vector reinterpret cast operation"]
21291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
21292#[inline(always)]
21293#[cfg(target_endian = "little")]
21294#[target_feature(enable = "neon")]
21295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21296#[cfg_attr(test, assert_instr(nop))]
21297pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
21298    unsafe { transmute(a) }
21299}
21300#[doc = "Vector reinterpret cast operation"]
21301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
21302#[inline(always)]
21303#[cfg(target_endian = "big")]
21304#[target_feature(enable = "neon")]
21305#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21306#[cfg_attr(test, assert_instr(nop))]
21307pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
21308    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21309    unsafe {
21310        let ret_val: float64x2_t = transmute(a);
21311        simd_shuffle!(ret_val, ret_val, [1, 0])
21312    }
21313}
21314#[doc = "Vector reinterpret cast operation"]
21315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
21316#[inline(always)]
21317#[cfg(target_endian = "little")]
21318#[target_feature(enable = "neon")]
21319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21320#[cfg_attr(test, assert_instr(nop))]
21321pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
21322    unsafe { transmute(a) }
21323}
21324#[doc = "Vector reinterpret cast operation"]
21325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
21326#[inline(always)]
21327#[cfg(target_endian = "big")]
21328#[target_feature(enable = "neon")]
21329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21330#[cfg_attr(test, assert_instr(nop))]
21331pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
21332    let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21333    unsafe { transmute(a) }
21334}
21335#[doc = "Vector reinterpret cast operation"]
21336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
21337#[inline(always)]
21338#[cfg(target_endian = "little")]
21339#[target_feature(enable = "neon")]
21340#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21341#[cfg_attr(test, assert_instr(nop))]
21342pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
21343    unsafe { transmute(a) }
21344}
21345#[doc = "Vector reinterpret cast operation"]
21346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
21347#[inline(always)]
21348#[cfg(target_endian = "big")]
21349#[target_feature(enable = "neon")]
21350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21351#[cfg_attr(test, assert_instr(nop))]
21352pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
21353    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21354    unsafe {
21355        let ret_val: float64x2_t = transmute(a);
21356        simd_shuffle!(ret_val, ret_val, [1, 0])
21357    }
21358}
21359#[doc = "Vector reinterpret cast operation"]
21360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"]
21361#[inline(always)]
21362#[target_feature(enable = "neon")]
21363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21364#[cfg_attr(test, assert_instr(nop))]
21365pub fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t {
21366    unsafe { transmute(a) }
21367}
21368#[doc = "Vector reinterpret cast operation"]
21369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"]
21370#[inline(always)]
21371#[target_feature(enable = "neon")]
21372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21373#[cfg_attr(test, assert_instr(nop))]
21374pub fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t {
21375    unsafe { transmute(a) }
21376}
21377#[doc = "Vector reinterpret cast operation"]
21378#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
21379#[inline(always)]
21380#[cfg(target_endian = "little")]
21381#[target_feature(enable = "neon")]
21382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21383#[cfg_attr(test, assert_instr(nop))]
21384pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
21385    unsafe { transmute(a) }
21386}
21387#[doc = "Vector reinterpret cast operation"]
21388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
21389#[inline(always)]
21390#[cfg(target_endian = "big")]
21391#[target_feature(enable = "neon")]
21392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21393#[cfg_attr(test, assert_instr(nop))]
21394pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
21395    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21396    unsafe {
21397        let ret_val: float64x2_t = transmute(a);
21398        simd_shuffle!(ret_val, ret_val, [1, 0])
21399    }
21400}
21401#[doc = "Vector reinterpret cast operation"]
21402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
21403#[inline(always)]
21404#[cfg(target_endian = "little")]
21405#[target_feature(enable = "neon")]
21406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21407#[cfg_attr(test, assert_instr(nop))]
21408pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
21409    unsafe { transmute(a) }
21410}
21411#[doc = "Vector reinterpret cast operation"]
21412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
21413#[inline(always)]
21414#[cfg(target_endian = "big")]
21415#[target_feature(enable = "neon")]
21416#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21417#[cfg_attr(test, assert_instr(nop))]
21418pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
21419    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21420    unsafe {
21421        let ret_val: poly64x2_t = transmute(a);
21422        simd_shuffle!(ret_val, ret_val, [1, 0])
21423    }
21424}
21425#[doc = "Vector reinterpret cast operation"]
21426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
21427#[inline(always)]
21428#[cfg(target_endian = "little")]
21429#[target_feature(enable = "neon")]
21430#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21431#[cfg_attr(test, assert_instr(nop))]
21432pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
21433    unsafe { transmute(a) }
21434}
21435#[doc = "Vector reinterpret cast operation"]
21436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
21437#[inline(always)]
21438#[cfg(target_endian = "big")]
21439#[target_feature(enable = "neon")]
21440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21441#[cfg_attr(test, assert_instr(nop))]
21442pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
21443    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21444    unsafe { transmute(a) }
21445}
21446#[doc = "Vector reinterpret cast operation"]
21447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
21448#[inline(always)]
21449#[cfg(target_endian = "little")]
21450#[target_feature(enable = "neon")]
21451#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21452#[cfg_attr(test, assert_instr(nop))]
21453pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
21454    unsafe { transmute(a) }
21455}
21456#[doc = "Vector reinterpret cast operation"]
21457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
21458#[inline(always)]
21459#[cfg(target_endian = "big")]
21460#[target_feature(enable = "neon")]
21461#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21462#[cfg_attr(test, assert_instr(nop))]
21463pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
21464    let a: poly8x16_t =
21465        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21466    unsafe {
21467        let ret_val: float64x2_t = transmute(a);
21468        simd_shuffle!(ret_val, ret_val, [1, 0])
21469    }
21470}
21471#[doc = "Vector reinterpret cast operation"]
21472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
21473#[inline(always)]
21474#[cfg(target_endian = "little")]
21475#[target_feature(enable = "neon")]
21476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21477#[cfg_attr(test, assert_instr(nop))]
21478pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
21479    unsafe { transmute(a) }
21480}
21481#[doc = "Vector reinterpret cast operation"]
21482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
21483#[inline(always)]
21484#[cfg(target_endian = "big")]
21485#[target_feature(enable = "neon")]
21486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21487#[cfg_attr(test, assert_instr(nop))]
21488pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
21489    let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21490    unsafe { transmute(a) }
21491}
21492#[doc = "Vector reinterpret cast operation"]
21493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
21494#[inline(always)]
21495#[cfg(target_endian = "little")]
21496#[target_feature(enable = "neon")]
21497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21498#[cfg_attr(test, assert_instr(nop))]
21499pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
21500    unsafe { transmute(a) }
21501}
21502#[doc = "Vector reinterpret cast operation"]
21503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
21504#[inline(always)]
21505#[cfg(target_endian = "big")]
21506#[target_feature(enable = "neon")]
21507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21508#[cfg_attr(test, assert_instr(nop))]
21509pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
21510    let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21511    unsafe {
21512        let ret_val: float64x2_t = transmute(a);
21513        simd_shuffle!(ret_val, ret_val, [1, 0])
21514    }
21515}
21516#[doc = "Vector reinterpret cast operation"]
21517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
21518#[inline(always)]
21519#[cfg(target_endian = "little")]
21520#[target_feature(enable = "neon")]
21521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21522#[cfg_attr(test, assert_instr(nop))]
21523pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
21524    unsafe { transmute(a) }
21525}
21526#[doc = "Vector reinterpret cast operation"]
21527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
21528#[inline(always)]
21529#[cfg(target_endian = "big")]
21530#[target_feature(enable = "neon")]
21531#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21532#[cfg_attr(test, assert_instr(nop))]
21533pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
21534    unsafe {
21535        let ret_val: float32x2_t = transmute(a);
21536        simd_shuffle!(ret_val, ret_val, [1, 0])
21537    }
21538}
21539#[doc = "Vector reinterpret cast operation"]
21540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"]
21541#[inline(always)]
21542#[target_feature(enable = "neon")]
21543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21544#[cfg_attr(test, assert_instr(nop))]
21545pub fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t {
21546    unsafe { transmute(a) }
21547}
21548#[doc = "Vector reinterpret cast operation"]
21549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"]
21550#[inline(always)]
21551#[target_feature(enable = "neon")]
21552#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21553#[cfg_attr(test, assert_instr(nop))]
21554pub fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t {
21555    unsafe { transmute(a) }
21556}
21557#[doc = "Vector reinterpret cast operation"]
21558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"]
21559#[inline(always)]
21560#[target_feature(enable = "neon")]
21561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21562#[cfg_attr(test, assert_instr(nop))]
21563pub fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t {
21564    unsafe { transmute(a) }
21565}
21566#[doc = "Vector reinterpret cast operation"]
21567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
21568#[inline(always)]
21569#[cfg(target_endian = "little")]
21570#[target_feature(enable = "neon")]
21571#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21572#[cfg_attr(test, assert_instr(nop))]
21573pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
21574    unsafe { transmute(a) }
21575}
21576#[doc = "Vector reinterpret cast operation"]
21577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
21578#[inline(always)]
21579#[cfg(target_endian = "big")]
21580#[target_feature(enable = "neon")]
21581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21582#[cfg_attr(test, assert_instr(nop))]
21583pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
21584    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21585    unsafe {
21586        let ret_val: float32x4_t = transmute(a);
21587        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21588    }
21589}
21590#[doc = "Vector reinterpret cast operation"]
21591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
21592#[inline(always)]
21593#[cfg(target_endian = "little")]
21594#[target_feature(enable = "neon")]
21595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21596#[cfg_attr(test, assert_instr(nop))]
21597pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
21598    unsafe { transmute(a) }
21599}
21600#[doc = "Vector reinterpret cast operation"]
21601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
21602#[inline(always)]
21603#[cfg(target_endian = "big")]
21604#[target_feature(enable = "neon")]
21605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21606#[cfg_attr(test, assert_instr(nop))]
21607pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
21608    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21609    unsafe {
21610        let ret_val: float64x2_t = transmute(a);
21611        simd_shuffle!(ret_val, ret_val, [1, 0])
21612    }
21613}
21614#[doc = "Vector reinterpret cast operation"]
21615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
21616#[inline(always)]
21617#[cfg(target_endian = "little")]
21618#[target_feature(enable = "neon")]
21619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21620#[cfg_attr(test, assert_instr(nop))]
21621pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
21622    unsafe { transmute(a) }
21623}
21624#[doc = "Vector reinterpret cast operation"]
21625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
21626#[inline(always)]
21627#[cfg(target_endian = "big")]
21628#[target_feature(enable = "neon")]
21629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21630#[cfg_attr(test, assert_instr(nop))]
21631pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
21632    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21633    unsafe {
21634        let ret_val: int64x2_t = transmute(a);
21635        simd_shuffle!(ret_val, ret_val, [1, 0])
21636    }
21637}
21638#[doc = "Vector reinterpret cast operation"]
21639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
21640#[inline(always)]
21641#[cfg(target_endian = "little")]
21642#[target_feature(enable = "neon")]
21643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21644#[cfg_attr(test, assert_instr(nop))]
21645pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
21646    unsafe { transmute(a) }
21647}
21648#[doc = "Vector reinterpret cast operation"]
21649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
21650#[inline(always)]
21651#[cfg(target_endian = "big")]
21652#[target_feature(enable = "neon")]
21653#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21654#[cfg_attr(test, assert_instr(nop))]
21655pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
21656    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21657    unsafe {
21658        let ret_val: uint64x2_t = transmute(a);
21659        simd_shuffle!(ret_val, ret_val, [1, 0])
21660    }
21661}
21662#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
21663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"]
21664#[inline(always)]
21665#[target_feature(enable = "neon,frintts")]
21666#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21667#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
21668pub fn vrnd32x_f32(a: float32x2_t) -> float32x2_t {
21669    unsafe extern "unadjusted" {
21670        #[cfg_attr(
21671            any(target_arch = "aarch64", target_arch = "arm64ec"),
21672            link_name = "llvm.aarch64.neon.frint32x.v2f32"
21673        )]
21674        fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t;
21675    }
21676    unsafe { _vrnd32x_f32(a) }
21677}
21678#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
21679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"]
21680#[inline(always)]
21681#[target_feature(enable = "neon,frintts")]
21682#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21683#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
21684pub fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t {
21685    unsafe extern "unadjusted" {
21686        #[cfg_attr(
21687            any(target_arch = "aarch64", target_arch = "arm64ec"),
21688            link_name = "llvm.aarch64.neon.frint32x.v4f32"
21689        )]
21690        fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t;
21691    }
21692    unsafe { _vrnd32xq_f32(a) }
21693}
21694#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
21695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"]
21696#[inline(always)]
21697#[target_feature(enable = "neon,frintts")]
21698#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21699#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
21700pub fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t {
21701    unsafe extern "unadjusted" {
21702        #[cfg_attr(
21703            any(target_arch = "aarch64", target_arch = "arm64ec"),
21704            link_name = "llvm.aarch64.neon.frint32x.v2f64"
21705        )]
21706        fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t;
21707    }
21708    unsafe { _vrnd32xq_f64(a) }
21709}
21710#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
21711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"]
21712#[inline(always)]
21713#[target_feature(enable = "neon,frintts")]
21714#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21715#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
21716pub fn vrnd32x_f64(a: float64x1_t) -> float64x1_t {
21717    unsafe extern "unadjusted" {
21718        #[cfg_attr(
21719            any(target_arch = "aarch64", target_arch = "arm64ec"),
21720            link_name = "llvm.aarch64.frint32x.f64"
21721        )]
21722        fn _vrnd32x_f64(a: f64) -> f64;
21723    }
21724    unsafe { transmute(_vrnd32x_f64(simd_extract!(a, 0))) }
21725}
21726#[doc = "Floating-point round to 32-bit integer toward zero"]
21727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"]
21728#[inline(always)]
21729#[target_feature(enable = "neon,frintts")]
21730#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21731#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
21732pub fn vrnd32z_f32(a: float32x2_t) -> float32x2_t {
21733    unsafe extern "unadjusted" {
21734        #[cfg_attr(
21735            any(target_arch = "aarch64", target_arch = "arm64ec"),
21736            link_name = "llvm.aarch64.neon.frint32z.v2f32"
21737        )]
21738        fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t;
21739    }
21740    unsafe { _vrnd32z_f32(a) }
21741}
21742#[doc = "Floating-point round to 32-bit integer toward zero"]
21743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"]
21744#[inline(always)]
21745#[target_feature(enable = "neon,frintts")]
21746#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21747#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
21748pub fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t {
21749    unsafe extern "unadjusted" {
21750        #[cfg_attr(
21751            any(target_arch = "aarch64", target_arch = "arm64ec"),
21752            link_name = "llvm.aarch64.neon.frint32z.v4f32"
21753        )]
21754        fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t;
21755    }
21756    unsafe { _vrnd32zq_f32(a) }
21757}
21758#[doc = "Floating-point round to 32-bit integer toward zero"]
21759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"]
21760#[inline(always)]
21761#[target_feature(enable = "neon,frintts")]
21762#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21763#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
21764pub fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t {
21765    unsafe extern "unadjusted" {
21766        #[cfg_attr(
21767            any(target_arch = "aarch64", target_arch = "arm64ec"),
21768            link_name = "llvm.aarch64.neon.frint32z.v2f64"
21769        )]
21770        fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t;
21771    }
21772    unsafe { _vrnd32zq_f64(a) }
21773}
21774#[doc = "Floating-point round to 32-bit integer toward zero"]
21775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"]
21776#[inline(always)]
21777#[target_feature(enable = "neon,frintts")]
21778#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21779#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
21780pub fn vrnd32z_f64(a: float64x1_t) -> float64x1_t {
21781    unsafe extern "unadjusted" {
21782        #[cfg_attr(
21783            any(target_arch = "aarch64", target_arch = "arm64ec"),
21784            link_name = "llvm.aarch64.frint32z.f64"
21785        )]
21786        fn _vrnd32z_f64(a: f64) -> f64;
21787    }
21788    unsafe { transmute(_vrnd32z_f64(simd_extract!(a, 0))) }
21789}
21790#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
21791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"]
21792#[inline(always)]
21793#[target_feature(enable = "neon,frintts")]
21794#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21795#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
21796pub fn vrnd64x_f32(a: float32x2_t) -> float32x2_t {
21797    unsafe extern "unadjusted" {
21798        #[cfg_attr(
21799            any(target_arch = "aarch64", target_arch = "arm64ec"),
21800            link_name = "llvm.aarch64.neon.frint64x.v2f32"
21801        )]
21802        fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t;
21803    }
21804    unsafe { _vrnd64x_f32(a) }
21805}
21806#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
21807#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"]
21808#[inline(always)]
21809#[target_feature(enable = "neon,frintts")]
21810#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21811#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
21812pub fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t {
21813    unsafe extern "unadjusted" {
21814        #[cfg_attr(
21815            any(target_arch = "aarch64", target_arch = "arm64ec"),
21816            link_name = "llvm.aarch64.neon.frint64x.v4f32"
21817        )]
21818        fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t;
21819    }
21820    unsafe { _vrnd64xq_f32(a) }
21821}
21822#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
21823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"]
21824#[inline(always)]
21825#[target_feature(enable = "neon,frintts")]
21826#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21827#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
21828pub fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t {
21829    unsafe extern "unadjusted" {
21830        #[cfg_attr(
21831            any(target_arch = "aarch64", target_arch = "arm64ec"),
21832            link_name = "llvm.aarch64.neon.frint64x.v2f64"
21833        )]
21834        fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t;
21835    }
21836    unsafe { _vrnd64xq_f64(a) }
21837}
21838#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
21839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"]
21840#[inline(always)]
21841#[target_feature(enable = "neon,frintts")]
21842#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21843#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
21844pub fn vrnd64x_f64(a: float64x1_t) -> float64x1_t {
21845    unsafe extern "unadjusted" {
21846        #[cfg_attr(
21847            any(target_arch = "aarch64", target_arch = "arm64ec"),
21848            link_name = "llvm.aarch64.frint64x.f64"
21849        )]
21850        fn _vrnd64x_f64(a: f64) -> f64;
21851    }
21852    unsafe { transmute(_vrnd64x_f64(simd_extract!(a, 0))) }
21853}
21854#[doc = "Floating-point round to 64-bit integer toward zero"]
21855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"]
21856#[inline(always)]
21857#[target_feature(enable = "neon,frintts")]
21858#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21859#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
21860pub fn vrnd64z_f32(a: float32x2_t) -> float32x2_t {
21861    unsafe extern "unadjusted" {
21862        #[cfg_attr(
21863            any(target_arch = "aarch64", target_arch = "arm64ec"),
21864            link_name = "llvm.aarch64.neon.frint64z.v2f32"
21865        )]
21866        fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t;
21867    }
21868    unsafe { _vrnd64z_f32(a) }
21869}
21870#[doc = "Floating-point round to 64-bit integer toward zero"]
21871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"]
21872#[inline(always)]
21873#[target_feature(enable = "neon,frintts")]
21874#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21875#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
21876pub fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t {
21877    unsafe extern "unadjusted" {
21878        #[cfg_attr(
21879            any(target_arch = "aarch64", target_arch = "arm64ec"),
21880            link_name = "llvm.aarch64.neon.frint64z.v4f32"
21881        )]
21882        fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t;
21883    }
21884    unsafe { _vrnd64zq_f32(a) }
21885}
21886#[doc = "Floating-point round to 64-bit integer toward zero"]
21887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"]
21888#[inline(always)]
21889#[target_feature(enable = "neon,frintts")]
21890#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21891#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
21892pub fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t {
21893    unsafe extern "unadjusted" {
21894        #[cfg_attr(
21895            any(target_arch = "aarch64", target_arch = "arm64ec"),
21896            link_name = "llvm.aarch64.neon.frint64z.v2f64"
21897        )]
21898        fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t;
21899    }
21900    unsafe { _vrnd64zq_f64(a) }
21901}
21902#[doc = "Floating-point round to 64-bit integer toward zero"]
21903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"]
21904#[inline(always)]
21905#[target_feature(enable = "neon,frintts")]
21906#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
21907#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
21908pub fn vrnd64z_f64(a: float64x1_t) -> float64x1_t {
21909    unsafe extern "unadjusted" {
21910        #[cfg_attr(
21911            any(target_arch = "aarch64", target_arch = "arm64ec"),
21912            link_name = "llvm.aarch64.frint64z.f64"
21913        )]
21914        fn _vrnd64z_f64(a: f64) -> f64;
21915    }
21916    unsafe { transmute(_vrnd64z_f64(simd_extract!(a, 0))) }
21917}
21918#[doc = "Floating-point round to integral, toward zero"]
21919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f16)"]
21920#[inline(always)]
21921#[target_feature(enable = "neon,fp16")]
21922#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21923#[cfg(not(target_arch = "arm64ec"))]
21924#[cfg_attr(test, assert_instr(frintz))]
21925pub fn vrnd_f16(a: float16x4_t) -> float16x4_t {
21926    unsafe { simd_trunc(a) }
21927}
21928#[doc = "Floating-point round to integral, toward zero"]
21929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f16)"]
21930#[inline(always)]
21931#[target_feature(enable = "neon,fp16")]
21932#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21933#[cfg(not(target_arch = "arm64ec"))]
21934#[cfg_attr(test, assert_instr(frintz))]
21935pub fn vrndq_f16(a: float16x8_t) -> float16x8_t {
21936    unsafe { simd_trunc(a) }
21937}
21938#[doc = "Floating-point round to integral, toward zero"]
21939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"]
21940#[inline(always)]
21941#[target_feature(enable = "neon")]
21942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21943#[cfg_attr(test, assert_instr(frintz))]
21944pub fn vrnd_f32(a: float32x2_t) -> float32x2_t {
21945    unsafe { simd_trunc(a) }
21946}
21947#[doc = "Floating-point round to integral, toward zero"]
21948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"]
21949#[inline(always)]
21950#[target_feature(enable = "neon")]
21951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21952#[cfg_attr(test, assert_instr(frintz))]
21953pub fn vrndq_f32(a: float32x4_t) -> float32x4_t {
21954    unsafe { simd_trunc(a) }
21955}
21956#[doc = "Floating-point round to integral, toward zero"]
21957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"]
21958#[inline(always)]
21959#[target_feature(enable = "neon")]
21960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21961#[cfg_attr(test, assert_instr(frintz))]
21962pub fn vrnd_f64(a: float64x1_t) -> float64x1_t {
21963    unsafe { simd_trunc(a) }
21964}
21965#[doc = "Floating-point round to integral, toward zero"]
21966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"]
21967#[inline(always)]
21968#[target_feature(enable = "neon")]
21969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21970#[cfg_attr(test, assert_instr(frintz))]
21971pub fn vrndq_f64(a: float64x2_t) -> float64x2_t {
21972    unsafe { simd_trunc(a) }
21973}
21974#[doc = "Floating-point round to integral, to nearest with ties to away"]
21975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f16)"]
21976#[inline(always)]
21977#[target_feature(enable = "neon,fp16")]
21978#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21979#[cfg(not(target_arch = "arm64ec"))]
21980#[cfg_attr(test, assert_instr(frinta))]
21981pub fn vrnda_f16(a: float16x4_t) -> float16x4_t {
21982    unsafe { simd_round(a) }
21983}
21984#[doc = "Floating-point round to integral, to nearest with ties to away"]
21985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f16)"]
21986#[inline(always)]
21987#[target_feature(enable = "neon,fp16")]
21988#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21989#[cfg(not(target_arch = "arm64ec"))]
21990#[cfg_attr(test, assert_instr(frinta))]
21991pub fn vrndaq_f16(a: float16x8_t) -> float16x8_t {
21992    unsafe { simd_round(a) }
21993}
21994#[doc = "Floating-point round to integral, to nearest with ties to away"]
21995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"]
21996#[inline(always)]
21997#[target_feature(enable = "neon")]
21998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21999#[cfg_attr(test, assert_instr(frinta))]
22000pub fn vrnda_f32(a: float32x2_t) -> float32x2_t {
22001    unsafe { simd_round(a) }
22002}
22003#[doc = "Floating-point round to integral, to nearest with ties to away"]
22004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"]
22005#[inline(always)]
22006#[target_feature(enable = "neon")]
22007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22008#[cfg_attr(test, assert_instr(frinta))]
22009pub fn vrndaq_f32(a: float32x4_t) -> float32x4_t {
22010    unsafe { simd_round(a) }
22011}
22012#[doc = "Floating-point round to integral, to nearest with ties to away"]
22013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"]
22014#[inline(always)]
22015#[target_feature(enable = "neon")]
22016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22017#[cfg_attr(test, assert_instr(frinta))]
22018pub fn vrnda_f64(a: float64x1_t) -> float64x1_t {
22019    unsafe { simd_round(a) }
22020}
22021#[doc = "Floating-point round to integral, to nearest with ties to away"]
22022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"]
22023#[inline(always)]
22024#[target_feature(enable = "neon")]
22025#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22026#[cfg_attr(test, assert_instr(frinta))]
22027pub fn vrndaq_f64(a: float64x2_t) -> float64x2_t {
22028    unsafe { simd_round(a) }
22029}
22030#[doc = "Floating-point round to integral, to nearest with ties to away"]
22031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndah_f16)"]
22032#[inline(always)]
22033#[target_feature(enable = "neon,fp16")]
22034#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22035#[cfg(not(target_arch = "arm64ec"))]
22036#[cfg_attr(test, assert_instr(frinta))]
22037pub fn vrndah_f16(a: f16) -> f16 {
22038    roundf16(a)
22039}
22040#[doc = "Floating-point round to integral, to nearest with ties to away"]
22041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndh_f16)"]
22042#[inline(always)]
22043#[target_feature(enable = "neon,fp16")]
22044#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22045#[cfg(not(target_arch = "arm64ec"))]
22046#[cfg_attr(test, assert_instr(frintz))]
22047pub fn vrndh_f16(a: f16) -> f16 {
22048    truncf16(a)
22049}
22050#[doc = "Floating-point round to integral, using current rounding mode"]
22051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f16)"]
22052#[inline(always)]
22053#[target_feature(enable = "neon,fp16")]
22054#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22055#[cfg(not(target_arch = "arm64ec"))]
22056#[cfg_attr(test, assert_instr(frinti))]
22057pub fn vrndi_f16(a: float16x4_t) -> float16x4_t {
22058    unsafe extern "unadjusted" {
22059        #[cfg_attr(
22060            any(target_arch = "aarch64", target_arch = "arm64ec"),
22061            link_name = "llvm.nearbyint.v4f16"
22062        )]
22063        fn _vrndi_f16(a: float16x4_t) -> float16x4_t;
22064    }
22065    unsafe { _vrndi_f16(a) }
22066}
22067#[doc = "Floating-point round to integral, using current rounding mode"]
22068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f16)"]
22069#[inline(always)]
22070#[target_feature(enable = "neon,fp16")]
22071#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22072#[cfg(not(target_arch = "arm64ec"))]
22073#[cfg_attr(test, assert_instr(frinti))]
22074pub fn vrndiq_f16(a: float16x8_t) -> float16x8_t {
22075    unsafe extern "unadjusted" {
22076        #[cfg_attr(
22077            any(target_arch = "aarch64", target_arch = "arm64ec"),
22078            link_name = "llvm.nearbyint.v8f16"
22079        )]
22080        fn _vrndiq_f16(a: float16x8_t) -> float16x8_t;
22081    }
22082    unsafe { _vrndiq_f16(a) }
22083}
22084#[doc = "Floating-point round to integral, using current rounding mode"]
22085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"]
22086#[inline(always)]
22087#[target_feature(enable = "neon")]
22088#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22089#[cfg_attr(test, assert_instr(frinti))]
22090pub fn vrndi_f32(a: float32x2_t) -> float32x2_t {
22091    unsafe extern "unadjusted" {
22092        #[cfg_attr(
22093            any(target_arch = "aarch64", target_arch = "arm64ec"),
22094            link_name = "llvm.nearbyint.v2f32"
22095        )]
22096        fn _vrndi_f32(a: float32x2_t) -> float32x2_t;
22097    }
22098    unsafe { _vrndi_f32(a) }
22099}
22100#[doc = "Floating-point round to integral, using current rounding mode"]
22101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"]
22102#[inline(always)]
22103#[target_feature(enable = "neon")]
22104#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22105#[cfg_attr(test, assert_instr(frinti))]
22106pub fn vrndiq_f32(a: float32x4_t) -> float32x4_t {
22107    unsafe extern "unadjusted" {
22108        #[cfg_attr(
22109            any(target_arch = "aarch64", target_arch = "arm64ec"),
22110            link_name = "llvm.nearbyint.v4f32"
22111        )]
22112        fn _vrndiq_f32(a: float32x4_t) -> float32x4_t;
22113    }
22114    unsafe { _vrndiq_f32(a) }
22115}
22116#[doc = "Floating-point round to integral, using current rounding mode"]
22117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"]
22118#[inline(always)]
22119#[target_feature(enable = "neon")]
22120#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22121#[cfg_attr(test, assert_instr(frinti))]
22122pub fn vrndi_f64(a: float64x1_t) -> float64x1_t {
22123    unsafe extern "unadjusted" {
22124        #[cfg_attr(
22125            any(target_arch = "aarch64", target_arch = "arm64ec"),
22126            link_name = "llvm.nearbyint.v1f64"
22127        )]
22128        fn _vrndi_f64(a: float64x1_t) -> float64x1_t;
22129    }
22130    unsafe { _vrndi_f64(a) }
22131}
22132#[doc = "Floating-point round to integral, using current rounding mode"]
22133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"]
22134#[inline(always)]
22135#[target_feature(enable = "neon")]
22136#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22137#[cfg_attr(test, assert_instr(frinti))]
22138pub fn vrndiq_f64(a: float64x2_t) -> float64x2_t {
22139    unsafe extern "unadjusted" {
22140        #[cfg_attr(
22141            any(target_arch = "aarch64", target_arch = "arm64ec"),
22142            link_name = "llvm.nearbyint.v2f64"
22143        )]
22144        fn _vrndiq_f64(a: float64x2_t) -> float64x2_t;
22145    }
22146    unsafe { _vrndiq_f64(a) }
22147}
22148#[doc = "Floating-point round to integral, using current rounding mode"]
22149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndih_f16)"]
22150#[inline(always)]
22151#[target_feature(enable = "neon,fp16")]
22152#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22153#[cfg(not(target_arch = "arm64ec"))]
22154#[cfg_attr(test, assert_instr(frinti))]
22155pub fn vrndih_f16(a: f16) -> f16 {
22156    unsafe extern "unadjusted" {
22157        #[cfg_attr(
22158            any(target_arch = "aarch64", target_arch = "arm64ec"),
22159            link_name = "llvm.nearbyint.f16"
22160        )]
22161        fn _vrndih_f16(a: f16) -> f16;
22162    }
22163    unsafe { _vrndih_f16(a) }
22164}
22165#[doc = "Floating-point round to integral, toward minus infinity"]
22166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f16)"]
22167#[inline(always)]
22168#[target_feature(enable = "neon,fp16")]
22169#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22170#[cfg(not(target_arch = "arm64ec"))]
22171#[cfg_attr(test, assert_instr(frintm))]
22172pub fn vrndm_f16(a: float16x4_t) -> float16x4_t {
22173    unsafe { simd_floor(a) }
22174}
22175#[doc = "Floating-point round to integral, toward minus infinity"]
22176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f16)"]
22177#[inline(always)]
22178#[target_feature(enable = "neon,fp16")]
22179#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22180#[cfg(not(target_arch = "arm64ec"))]
22181#[cfg_attr(test, assert_instr(frintm))]
22182pub fn vrndmq_f16(a: float16x8_t) -> float16x8_t {
22183    unsafe { simd_floor(a) }
22184}
22185#[doc = "Floating-point round to integral, toward minus infinity"]
22186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"]
22187#[inline(always)]
22188#[target_feature(enable = "neon")]
22189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22190#[cfg_attr(test, assert_instr(frintm))]
22191pub fn vrndm_f32(a: float32x2_t) -> float32x2_t {
22192    unsafe { simd_floor(a) }
22193}
22194#[doc = "Floating-point round to integral, toward minus infinity"]
22195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"]
22196#[inline(always)]
22197#[target_feature(enable = "neon")]
22198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22199#[cfg_attr(test, assert_instr(frintm))]
22200pub fn vrndmq_f32(a: float32x4_t) -> float32x4_t {
22201    unsafe { simd_floor(a) }
22202}
22203#[doc = "Floating-point round to integral, toward minus infinity"]
22204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"]
22205#[inline(always)]
22206#[target_feature(enable = "neon")]
22207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22208#[cfg_attr(test, assert_instr(frintm))]
22209pub fn vrndm_f64(a: float64x1_t) -> float64x1_t {
22210    unsafe { simd_floor(a) }
22211}
22212#[doc = "Floating-point round to integral, toward minus infinity"]
22213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"]
22214#[inline(always)]
22215#[target_feature(enable = "neon")]
22216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22217#[cfg_attr(test, assert_instr(frintm))]
22218pub fn vrndmq_f64(a: float64x2_t) -> float64x2_t {
22219    unsafe { simd_floor(a) }
22220}
22221#[doc = "Floating-point round to integral, toward minus infinity"]
22222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmh_f16)"]
22223#[inline(always)]
22224#[target_feature(enable = "neon,fp16")]
22225#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22226#[cfg(not(target_arch = "arm64ec"))]
22227#[cfg_attr(test, assert_instr(frintm))]
22228pub fn vrndmh_f16(a: f16) -> f16 {
22229    floorf16(a)
22230}
22231#[doc = "Floating-point round to integral, to nearest with ties to even"]
22232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"]
22233#[inline(always)]
22234#[target_feature(enable = "neon")]
22235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22236#[cfg_attr(test, assert_instr(frintn))]
22237pub fn vrndn_f64(a: float64x1_t) -> float64x1_t {
22238    unsafe extern "unadjusted" {
22239        #[cfg_attr(
22240            any(target_arch = "aarch64", target_arch = "arm64ec"),
22241            link_name = "llvm.roundeven.v1f64"
22242        )]
22243        fn _vrndn_f64(a: float64x1_t) -> float64x1_t;
22244    }
22245    unsafe { _vrndn_f64(a) }
22246}
22247#[doc = "Floating-point round to integral, to nearest with ties to even"]
22248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"]
22249#[inline(always)]
22250#[target_feature(enable = "neon")]
22251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22252#[cfg_attr(test, assert_instr(frintn))]
22253pub fn vrndnq_f64(a: float64x2_t) -> float64x2_t {
22254    unsafe extern "unadjusted" {
22255        #[cfg_attr(
22256            any(target_arch = "aarch64", target_arch = "arm64ec"),
22257            link_name = "llvm.roundeven.v2f64"
22258        )]
22259        fn _vrndnq_f64(a: float64x2_t) -> float64x2_t;
22260    }
22261    unsafe { _vrndnq_f64(a) }
22262}
22263#[doc = "Floating-point round to integral, toward minus infinity"]
22264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnh_f16)"]
22265#[inline(always)]
22266#[target_feature(enable = "neon,fp16")]
22267#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22268#[cfg(not(target_arch = "arm64ec"))]
22269#[cfg_attr(test, assert_instr(frintn))]
22270pub fn vrndnh_f16(a: f16) -> f16 {
22271    unsafe extern "unadjusted" {
22272        #[cfg_attr(
22273            any(target_arch = "aarch64", target_arch = "arm64ec"),
22274            link_name = "llvm.roundeven.f16"
22275        )]
22276        fn _vrndnh_f16(a: f16) -> f16;
22277    }
22278    unsafe { _vrndnh_f16(a) }
22279}
22280#[doc = "Floating-point round to integral, to nearest with ties to even"]
22281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"]
22282#[inline(always)]
22283#[target_feature(enable = "neon")]
22284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22285#[cfg_attr(test, assert_instr(frintn))]
22286pub fn vrndns_f32(a: f32) -> f32 {
22287    unsafe extern "unadjusted" {
22288        #[cfg_attr(
22289            any(target_arch = "aarch64", target_arch = "arm64ec"),
22290            link_name = "llvm.roundeven.f32"
22291        )]
22292        fn _vrndns_f32(a: f32) -> f32;
22293    }
22294    unsafe { _vrndns_f32(a) }
22295}
22296#[doc = "Floating-point round to integral, toward plus infinity"]
22297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f16)"]
22298#[inline(always)]
22299#[target_feature(enable = "neon,fp16")]
22300#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22301#[cfg(not(target_arch = "arm64ec"))]
22302#[cfg_attr(test, assert_instr(frintp))]
22303pub fn vrndp_f16(a: float16x4_t) -> float16x4_t {
22304    unsafe { simd_ceil(a) }
22305}
22306#[doc = "Floating-point round to integral, toward plus infinity"]
22307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f16)"]
22308#[inline(always)]
22309#[target_feature(enable = "neon,fp16")]
22310#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22311#[cfg(not(target_arch = "arm64ec"))]
22312#[cfg_attr(test, assert_instr(frintp))]
22313pub fn vrndpq_f16(a: float16x8_t) -> float16x8_t {
22314    unsafe { simd_ceil(a) }
22315}
22316#[doc = "Floating-point round to integral, toward plus infinity"]
22317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"]
22318#[inline(always)]
22319#[target_feature(enable = "neon")]
22320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22321#[cfg_attr(test, assert_instr(frintp))]
22322pub fn vrndp_f32(a: float32x2_t) -> float32x2_t {
22323    unsafe { simd_ceil(a) }
22324}
22325#[doc = "Floating-point round to integral, toward plus infinity"]
22326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"]
22327#[inline(always)]
22328#[target_feature(enable = "neon")]
22329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22330#[cfg_attr(test, assert_instr(frintp))]
22331pub fn vrndpq_f32(a: float32x4_t) -> float32x4_t {
22332    unsafe { simd_ceil(a) }
22333}
22334#[doc = "Floating-point round to integral, toward plus infinity"]
22335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"]
22336#[inline(always)]
22337#[target_feature(enable = "neon")]
22338#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22339#[cfg_attr(test, assert_instr(frintp))]
22340pub fn vrndp_f64(a: float64x1_t) -> float64x1_t {
22341    unsafe { simd_ceil(a) }
22342}
22343#[doc = "Floating-point round to integral, toward plus infinity"]
22344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"]
22345#[inline(always)]
22346#[target_feature(enable = "neon")]
22347#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22348#[cfg_attr(test, assert_instr(frintp))]
22349pub fn vrndpq_f64(a: float64x2_t) -> float64x2_t {
22350    unsafe { simd_ceil(a) }
22351}
22352#[doc = "Floating-point round to integral, toward plus infinity"]
22353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndph_f16)"]
22354#[inline(always)]
22355#[target_feature(enable = "neon,fp16")]
22356#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22357#[cfg(not(target_arch = "arm64ec"))]
22358#[cfg_attr(test, assert_instr(frintp))]
22359pub fn vrndph_f16(a: f16) -> f16 {
22360    ceilf16(a)
22361}
22362#[doc = "Floating-point round to integral exact, using current rounding mode"]
22363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f16)"]
22364#[inline(always)]
22365#[target_feature(enable = "neon,fp16")]
22366#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22367#[cfg(not(target_arch = "arm64ec"))]
22368#[cfg_attr(test, assert_instr(frintx))]
22369pub fn vrndx_f16(a: float16x4_t) -> float16x4_t {
22370    unsafe { simd_round_ties_even(a) }
22371}
22372#[doc = "Floating-point round to integral exact, using current rounding mode"]
22373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f16)"]
22374#[inline(always)]
22375#[target_feature(enable = "neon,fp16")]
22376#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
22377#[cfg(not(target_arch = "arm64ec"))]
22378#[cfg_attr(test, assert_instr(frintx))]
22379pub fn vrndxq_f16(a: float16x8_t) -> float16x8_t {
22380    unsafe { simd_round_ties_even(a) }
22381}
22382#[doc = "Floating-point round to integral exact, using current rounding mode"]
22383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"]
22384#[inline(always)]
22385#[target_feature(enable = "neon")]
22386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22387#[cfg_attr(test, assert_instr(frintx))]
22388pub fn vrndx_f32(a: float32x2_t) -> float32x2_t {
22389    unsafe { simd_round_ties_even(a) }
22390}
22391#[doc = "Floating-point round to integral exact, using current rounding mode"]
22392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"]
22393#[inline(always)]
22394#[target_feature(enable = "neon")]
22395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22396#[cfg_attr(test, assert_instr(frintx))]
22397pub fn vrndxq_f32(a: float32x4_t) -> float32x4_t {
22398    unsafe { simd_round_ties_even(a) }
22399}
22400#[doc = "Floating-point round to integral exact, using current rounding mode"]
22401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"]
22402#[inline(always)]
22403#[target_feature(enable = "neon")]
22404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22405#[cfg_attr(test, assert_instr(frintx))]
22406pub fn vrndx_f64(a: float64x1_t) -> float64x1_t {
22407    unsafe { simd_round_ties_even(a) }
22408}
22409#[doc = "Floating-point round to integral exact, using current rounding mode"]
22410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"]
22411#[inline(always)]
22412#[target_feature(enable = "neon")]
22413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22414#[cfg_attr(test, assert_instr(frintx))]
22415pub fn vrndxq_f64(a: float64x2_t) -> float64x2_t {
22416    unsafe { simd_round_ties_even(a) }
22417}
22418#[doc = "Floating-point round to integral, using current rounding mode"]
22419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxh_f16)"]
22420#[inline(always)]
22421#[target_feature(enable = "neon,fp16")]
22422#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22423#[cfg(not(target_arch = "arm64ec"))]
22424#[cfg_attr(test, assert_instr(frintx))]
22425pub fn vrndxh_f16(a: f16) -> f16 {
22426    round_ties_even_f16(a)
22427}
22428#[doc = "Signed rounding shift left"]
22429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"]
22430#[inline(always)]
22431#[target_feature(enable = "neon")]
22432#[cfg_attr(test, assert_instr(srshl))]
22433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22434pub fn vrshld_s64(a: i64, b: i64) -> i64 {
22435    unsafe extern "unadjusted" {
22436        #[cfg_attr(
22437            any(target_arch = "aarch64", target_arch = "arm64ec"),
22438            link_name = "llvm.aarch64.neon.srshl.i64"
22439        )]
22440        fn _vrshld_s64(a: i64, b: i64) -> i64;
22441    }
22442    unsafe { _vrshld_s64(a, b) }
22443}
22444#[doc = "Unsigned rounding shift left"]
22445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"]
22446#[inline(always)]
22447#[target_feature(enable = "neon")]
22448#[cfg_attr(test, assert_instr(urshl))]
22449#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22450pub fn vrshld_u64(a: u64, b: i64) -> u64 {
22451    unsafe extern "unadjusted" {
22452        #[cfg_attr(
22453            any(target_arch = "aarch64", target_arch = "arm64ec"),
22454            link_name = "llvm.aarch64.neon.urshl.i64"
22455        )]
22456        fn _vrshld_u64(a: u64, b: i64) -> u64;
22457    }
22458    unsafe { _vrshld_u64(a, b) }
22459}
22460#[doc = "Signed rounding shift right"]
22461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"]
22462#[inline(always)]
22463#[target_feature(enable = "neon")]
22464#[cfg_attr(test, assert_instr(srshr, N = 2))]
22465#[rustc_legacy_const_generics(1)]
22466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22467pub fn vrshrd_n_s64<const N: i32>(a: i64) -> i64 {
22468    static_assert!(N >= 1 && N <= 64);
22469    vrshld_s64(a, -N as i64)
22470}
22471#[doc = "Unsigned rounding shift right"]
22472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"]
22473#[inline(always)]
22474#[target_feature(enable = "neon")]
22475#[cfg_attr(test, assert_instr(urshr, N = 2))]
22476#[rustc_legacy_const_generics(1)]
22477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22478pub fn vrshrd_n_u64<const N: i32>(a: u64) -> u64 {
22479    static_assert!(N >= 1 && N <= 64);
22480    vrshld_u64(a, -N as i64)
22481}
22482#[doc = "Rounding shift right narrow"]
22483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"]
22484#[inline(always)]
22485#[target_feature(enable = "neon")]
22486#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22487#[rustc_legacy_const_generics(2)]
22488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22489pub fn vrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
22490    static_assert!(N >= 1 && N <= 8);
22491    unsafe {
22492        simd_shuffle!(
22493            a,
22494            vrshrn_n_s16::<N>(b),
22495            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
22496        )
22497    }
22498}
22499#[doc = "Rounding shift right narrow"]
22500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"]
22501#[inline(always)]
22502#[target_feature(enable = "neon")]
22503#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22504#[rustc_legacy_const_generics(2)]
22505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22506pub fn vrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
22507    static_assert!(N >= 1 && N <= 16);
22508    unsafe { simd_shuffle!(a, vrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
22509}
22510#[doc = "Rounding shift right narrow"]
22511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"]
22512#[inline(always)]
22513#[target_feature(enable = "neon")]
22514#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22515#[rustc_legacy_const_generics(2)]
22516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22517pub fn vrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
22518    static_assert!(N >= 1 && N <= 32);
22519    unsafe { simd_shuffle!(a, vrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
22520}
22521#[doc = "Rounding shift right narrow"]
22522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"]
22523#[inline(always)]
22524#[target_feature(enable = "neon")]
22525#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22526#[rustc_legacy_const_generics(2)]
22527#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22528pub fn vrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
22529    static_assert!(N >= 1 && N <= 8);
22530    unsafe {
22531        simd_shuffle!(
22532            a,
22533            vrshrn_n_u16::<N>(b),
22534            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
22535        )
22536    }
22537}
22538#[doc = "Rounding shift right narrow"]
22539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"]
22540#[inline(always)]
22541#[target_feature(enable = "neon")]
22542#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22543#[rustc_legacy_const_generics(2)]
22544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22545pub fn vrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
22546    static_assert!(N >= 1 && N <= 16);
22547    unsafe { simd_shuffle!(a, vrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
22548}
22549#[doc = "Rounding shift right narrow"]
22550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"]
22551#[inline(always)]
22552#[target_feature(enable = "neon")]
22553#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
22554#[rustc_legacy_const_generics(2)]
22555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22556pub fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
22557    static_assert!(N >= 1 && N <= 32);
22558    unsafe { simd_shuffle!(a, vrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
22559}
22560#[doc = "Reciprocal square-root estimate."]
22561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"]
22562#[inline(always)]
22563#[target_feature(enable = "neon")]
22564#[cfg_attr(test, assert_instr(frsqrte))]
22565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22566pub fn vrsqrte_f64(a: float64x1_t) -> float64x1_t {
22567    unsafe extern "unadjusted" {
22568        #[cfg_attr(
22569            any(target_arch = "aarch64", target_arch = "arm64ec"),
22570            link_name = "llvm.aarch64.neon.frsqrte.v1f64"
22571        )]
22572        fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t;
22573    }
22574    unsafe { _vrsqrte_f64(a) }
22575}
22576#[doc = "Reciprocal square-root estimate."]
22577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"]
22578#[inline(always)]
22579#[target_feature(enable = "neon")]
22580#[cfg_attr(test, assert_instr(frsqrte))]
22581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22582pub fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t {
22583    unsafe extern "unadjusted" {
22584        #[cfg_attr(
22585            any(target_arch = "aarch64", target_arch = "arm64ec"),
22586            link_name = "llvm.aarch64.neon.frsqrte.v2f64"
22587        )]
22588        fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t;
22589    }
22590    unsafe { _vrsqrteq_f64(a) }
22591}
22592#[doc = "Reciprocal square-root estimate."]
22593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"]
22594#[inline(always)]
22595#[target_feature(enable = "neon")]
22596#[cfg_attr(test, assert_instr(frsqrte))]
22597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22598pub fn vrsqrted_f64(a: f64) -> f64 {
22599    unsafe extern "unadjusted" {
22600        #[cfg_attr(
22601            any(target_arch = "aarch64", target_arch = "arm64ec"),
22602            link_name = "llvm.aarch64.neon.frsqrte.f64"
22603        )]
22604        fn _vrsqrted_f64(a: f64) -> f64;
22605    }
22606    unsafe { _vrsqrted_f64(a) }
22607}
22608#[doc = "Reciprocal square-root estimate."]
22609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"]
22610#[inline(always)]
22611#[target_feature(enable = "neon")]
22612#[cfg_attr(test, assert_instr(frsqrte))]
22613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22614pub fn vrsqrtes_f32(a: f32) -> f32 {
22615    unsafe extern "unadjusted" {
22616        #[cfg_attr(
22617            any(target_arch = "aarch64", target_arch = "arm64ec"),
22618            link_name = "llvm.aarch64.neon.frsqrte.f32"
22619        )]
22620        fn _vrsqrtes_f32(a: f32) -> f32;
22621    }
22622    unsafe { _vrsqrtes_f32(a) }
22623}
22624#[doc = "Reciprocal square-root estimate."]
22625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteh_f16)"]
22626#[inline(always)]
22627#[cfg_attr(test, assert_instr(frsqrte))]
22628#[target_feature(enable = "neon,fp16")]
22629#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22630#[cfg(not(target_arch = "arm64ec"))]
22631pub fn vrsqrteh_f16(a: f16) -> f16 {
22632    unsafe extern "unadjusted" {
22633        #[cfg_attr(
22634            any(target_arch = "aarch64", target_arch = "arm64ec"),
22635            link_name = "llvm.aarch64.neon.frsqrte.f16"
22636        )]
22637        fn _vrsqrteh_f16(a: f16) -> f16;
22638    }
22639    unsafe { _vrsqrteh_f16(a) }
22640}
22641#[doc = "Floating-point reciprocal square root step"]
22642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"]
22643#[inline(always)]
22644#[target_feature(enable = "neon")]
22645#[cfg_attr(test, assert_instr(frsqrts))]
22646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22647pub fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
22648    unsafe extern "unadjusted" {
22649        #[cfg_attr(
22650            any(target_arch = "aarch64", target_arch = "arm64ec"),
22651            link_name = "llvm.aarch64.neon.frsqrts.v1f64"
22652        )]
22653        fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
22654    }
22655    unsafe { _vrsqrts_f64(a, b) }
22656}
22657#[doc = "Floating-point reciprocal square root step"]
22658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"]
22659#[inline(always)]
22660#[target_feature(enable = "neon")]
22661#[cfg_attr(test, assert_instr(frsqrts))]
22662#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22663pub fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
22664    unsafe extern "unadjusted" {
22665        #[cfg_attr(
22666            any(target_arch = "aarch64", target_arch = "arm64ec"),
22667            link_name = "llvm.aarch64.neon.frsqrts.v2f64"
22668        )]
22669        fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
22670    }
22671    unsafe { _vrsqrtsq_f64(a, b) }
22672}
22673#[doc = "Floating-point reciprocal square root step"]
22674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"]
22675#[inline(always)]
22676#[target_feature(enable = "neon")]
22677#[cfg_attr(test, assert_instr(frsqrts))]
22678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22679pub fn vrsqrtsd_f64(a: f64, b: f64) -> f64 {
22680    unsafe extern "unadjusted" {
22681        #[cfg_attr(
22682            any(target_arch = "aarch64", target_arch = "arm64ec"),
22683            link_name = "llvm.aarch64.neon.frsqrts.f64"
22684        )]
22685        fn _vrsqrtsd_f64(a: f64, b: f64) -> f64;
22686    }
22687    unsafe { _vrsqrtsd_f64(a, b) }
22688}
22689#[doc = "Floating-point reciprocal square root step"]
22690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"]
22691#[inline(always)]
22692#[target_feature(enable = "neon")]
22693#[cfg_attr(test, assert_instr(frsqrts))]
22694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22695pub fn vrsqrtss_f32(a: f32, b: f32) -> f32 {
22696    unsafe extern "unadjusted" {
22697        #[cfg_attr(
22698            any(target_arch = "aarch64", target_arch = "arm64ec"),
22699            link_name = "llvm.aarch64.neon.frsqrts.f32"
22700        )]
22701        fn _vrsqrtss_f32(a: f32, b: f32) -> f32;
22702    }
22703    unsafe { _vrsqrtss_f32(a, b) }
22704}
22705#[doc = "Floating-point reciprocal square root step"]
22706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsh_f16)"]
22707#[inline(always)]
22708#[target_feature(enable = "neon,fp16")]
22709#[cfg_attr(test, assert_instr(frsqrts))]
22710#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
22711#[cfg(not(target_arch = "arm64ec"))]
22712pub fn vrsqrtsh_f16(a: f16, b: f16) -> f16 {
22713    unsafe extern "unadjusted" {
22714        #[cfg_attr(
22715            any(target_arch = "aarch64", target_arch = "arm64ec"),
22716            link_name = "llvm.aarch64.neon.frsqrts.f16"
22717        )]
22718        fn _vrsqrtsh_f16(a: f16, b: f16) -> f16;
22719    }
22720    unsafe { _vrsqrtsh_f16(a, b) }
22721}
22722#[doc = "Signed rounding shift right and accumulate."]
22723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"]
22724#[inline(always)]
22725#[target_feature(enable = "neon")]
22726#[cfg_attr(test, assert_instr(srshr, N = 2))]
22727#[rustc_legacy_const_generics(2)]
22728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22729pub fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
22730    static_assert!(N >= 1 && N <= 64);
22731    let b: i64 = vrshrd_n_s64::<N>(b);
22732    a.wrapping_add(b)
22733}
22734#[doc = "Unsigned rounding shift right and accumulate."]
22735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"]
22736#[inline(always)]
22737#[target_feature(enable = "neon")]
22738#[cfg_attr(test, assert_instr(urshr, N = 2))]
22739#[rustc_legacy_const_generics(2)]
22740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22741pub fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
22742    static_assert!(N >= 1 && N <= 64);
22743    let b: u64 = vrshrd_n_u64::<N>(b);
22744    a.wrapping_add(b)
22745}
22746#[doc = "Rounding subtract returning high narrow"]
22747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
22748#[inline(always)]
22749#[target_feature(enable = "neon")]
22750#[cfg(target_endian = "little")]
22751#[cfg_attr(test, assert_instr(rsubhn2))]
22752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22753pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
22754    let x: int8x8_t = vrsubhn_s16(b, c);
22755    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
22756}
22757#[doc = "Rounding subtract returning high narrow"]
22758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
22759#[inline(always)]
22760#[target_feature(enable = "neon")]
22761#[cfg(target_endian = "little")]
22762#[cfg_attr(test, assert_instr(rsubhn2))]
22763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22764pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
22765    let x: int16x4_t = vrsubhn_s32(b, c);
22766    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
22767}
22768#[doc = "Rounding subtract returning high narrow"]
22769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
22770#[inline(always)]
22771#[target_feature(enable = "neon")]
22772#[cfg(target_endian = "little")]
22773#[cfg_attr(test, assert_instr(rsubhn2))]
22774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22775pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
22776    let x: int32x2_t = vrsubhn_s64(b, c);
22777    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
22778}
22779#[doc = "Rounding subtract returning high narrow"]
22780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
22781#[inline(always)]
22782#[target_feature(enable = "neon")]
22783#[cfg(target_endian = "little")]
22784#[cfg_attr(test, assert_instr(rsubhn2))]
22785#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22786pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
22787    let x: uint8x8_t = vrsubhn_u16(b, c);
22788    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
22789}
22790#[doc = "Rounding subtract returning high narrow"]
22791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
22792#[inline(always)]
22793#[target_feature(enable = "neon")]
22794#[cfg(target_endian = "little")]
22795#[cfg_attr(test, assert_instr(rsubhn2))]
22796#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22797pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
22798    let x: uint16x4_t = vrsubhn_u32(b, c);
22799    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
22800}
22801#[doc = "Rounding subtract returning high narrow"]
22802#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
22803#[inline(always)]
22804#[target_feature(enable = "neon")]
22805#[cfg(target_endian = "little")]
22806#[cfg_attr(test, assert_instr(rsubhn2))]
22807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22808pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
22809    let x: uint32x2_t = vrsubhn_u64(b, c);
22810    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
22811}
22812#[doc = "Rounding subtract returning high narrow"]
22813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
22814#[inline(always)]
22815#[target_feature(enable = "neon")]
22816#[cfg(target_endian = "big")]
22817#[cfg_attr(test, assert_instr(rsubhn))]
22818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22819pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
22820    let x: int8x8_t = vrsubhn_s16(b, c);
22821    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
22822}
22823#[doc = "Rounding subtract returning high narrow"]
22824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
22825#[inline(always)]
22826#[target_feature(enable = "neon")]
22827#[cfg(target_endian = "big")]
22828#[cfg_attr(test, assert_instr(rsubhn))]
22829#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22830pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
22831    let x: int16x4_t = vrsubhn_s32(b, c);
22832    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
22833}
22834#[doc = "Rounding subtract returning high narrow"]
22835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
22836#[inline(always)]
22837#[target_feature(enable = "neon")]
22838#[cfg(target_endian = "big")]
22839#[cfg_attr(test, assert_instr(rsubhn))]
22840#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22841pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
22842    let x: int32x2_t = vrsubhn_s64(b, c);
22843    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
22844}
22845#[doc = "Rounding subtract returning high narrow"]
22846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
22847#[inline(always)]
22848#[target_feature(enable = "neon")]
22849#[cfg(target_endian = "big")]
22850#[cfg_attr(test, assert_instr(rsubhn))]
22851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22852pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
22853    let x: uint8x8_t = vrsubhn_u16(b, c);
22854    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
22855}
22856#[doc = "Rounding subtract returning high narrow"]
22857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
22858#[inline(always)]
22859#[target_feature(enable = "neon")]
22860#[cfg(target_endian = "big")]
22861#[cfg_attr(test, assert_instr(rsubhn))]
22862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22863pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
22864    let x: uint16x4_t = vrsubhn_u32(b, c);
22865    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
22866}
22867#[doc = "Rounding subtract returning high narrow"]
22868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
22869#[inline(always)]
22870#[target_feature(enable = "neon")]
22871#[cfg(target_endian = "big")]
22872#[cfg_attr(test, assert_instr(rsubhn))]
22873#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22874pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
22875    let x: uint32x2_t = vrsubhn_u64(b, c);
22876    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
22877}
22878#[doc = "Multi-vector floating-point adjust exponent"]
22879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f16)"]
22880#[inline(always)]
22881#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
22882#[target_feature(enable = "neon,fp8")]
22883#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
22884pub fn vscale_f16(vn: float16x4_t, vm: int16x4_t) -> float16x4_t {
22885    unsafe extern "unadjusted" {
22886        #[cfg_attr(
22887            any(target_arch = "aarch64", target_arch = "arm64ec"),
22888            link_name = "llvm.aarch64.neon.fp8.fscale.v4f16"
22889        )]
22890        fn _vscale_f16(vn: float16x4_t, vm: int16x4_t) -> float16x4_t;
22891    }
22892    unsafe { _vscale_f16(vn, vm) }
22893}
22894#[doc = "Multi-vector floating-point adjust exponent"]
22895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f16)"]
22896#[inline(always)]
22897#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
22898#[target_feature(enable = "neon,fp8")]
22899#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
22900pub fn vscaleq_f16(vn: float16x8_t, vm: int16x8_t) -> float16x8_t {
22901    unsafe extern "unadjusted" {
22902        #[cfg_attr(
22903            any(target_arch = "aarch64", target_arch = "arm64ec"),
22904            link_name = "llvm.aarch64.neon.fp8.fscale.v8f16"
22905        )]
22906        fn _vscaleq_f16(vn: float16x8_t, vm: int16x8_t) -> float16x8_t;
22907    }
22908    unsafe { _vscaleq_f16(vn, vm) }
22909}
22910#[doc = "Multi-vector floating-point adjust exponent"]
22911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f32)"]
22912#[inline(always)]
22913#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
22914#[target_feature(enable = "neon,fp8")]
22915#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
22916pub fn vscale_f32(vn: float32x2_t, vm: int32x2_t) -> float32x2_t {
22917    unsafe extern "unadjusted" {
22918        #[cfg_attr(
22919            any(target_arch = "aarch64", target_arch = "arm64ec"),
22920            link_name = "llvm.aarch64.neon.fp8.fscale.v2f32"
22921        )]
22922        fn _vscale_f32(vn: float32x2_t, vm: int32x2_t) -> float32x2_t;
22923    }
22924    unsafe { _vscale_f32(vn, vm) }
22925}
22926#[doc = "Multi-vector floating-point adjust exponent"]
22927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f32)"]
22928#[inline(always)]
22929#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
22930#[target_feature(enable = "neon,fp8")]
22931#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
22932pub fn vscaleq_f32(vn: float32x4_t, vm: int32x4_t) -> float32x4_t {
22933    unsafe extern "unadjusted" {
22934        #[cfg_attr(
22935            any(target_arch = "aarch64", target_arch = "arm64ec"),
22936            link_name = "llvm.aarch64.neon.fp8.fscale.v4f32"
22937        )]
22938        fn _vscaleq_f32(vn: float32x4_t, vm: int32x4_t) -> float32x4_t;
22939    }
22940    unsafe { _vscaleq_f32(vn, vm) }
22941}
22942#[doc = "Multi-vector floating-point adjust exponent"]
22943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f64)"]
22944#[inline(always)]
22945#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
22946#[target_feature(enable = "neon,fp8")]
22947#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
22948pub fn vscaleq_f64(vn: float64x2_t, vm: int64x2_t) -> float64x2_t {
22949    unsafe extern "unadjusted" {
22950        #[cfg_attr(
22951            any(target_arch = "aarch64", target_arch = "arm64ec"),
22952            link_name = "llvm.aarch64.neon.fp8.fscale.v2f64"
22953        )]
22954        fn _vscaleq_f64(vn: float64x2_t, vm: int64x2_t) -> float64x2_t;
22955    }
22956    unsafe { _vscaleq_f64(vn, vm) }
22957}
22958#[doc = "Insert vector element from another vector element"]
22959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"]
22960#[inline(always)]
22961#[target_feature(enable = "neon")]
22962#[cfg_attr(test, assert_instr(nop, LANE = 0))]
22963#[rustc_legacy_const_generics(2)]
22964#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22965pub fn vset_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> float64x1_t {
22966    static_assert!(LANE == 0);
22967    unsafe { simd_insert!(b, LANE as u32, a) }
22968}
22969#[doc = "Insert vector element from another vector element"]
22970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"]
22971#[inline(always)]
22972#[target_feature(enable = "neon")]
22973#[cfg_attr(test, assert_instr(nop, LANE = 0))]
22974#[rustc_legacy_const_generics(2)]
22975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22976pub fn vsetq_lane_f64<const LANE: i32>(a: f64, b: float64x2_t) -> float64x2_t {
22977    static_assert_uimm_bits!(LANE, 1);
22978    unsafe { simd_insert!(b, LANE as u32, a) }
22979}
22980#[doc = "SHA512 hash update part 2"]
22981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"]
22982#[inline(always)]
22983#[target_feature(enable = "neon,sha3")]
22984#[cfg_attr(test, assert_instr(sha512h2))]
22985#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
22986pub fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
22987    unsafe extern "unadjusted" {
22988        #[cfg_attr(
22989            any(target_arch = "aarch64", target_arch = "arm64ec"),
22990            link_name = "llvm.aarch64.crypto.sha512h2"
22991        )]
22992        fn _vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
22993    }
22994    unsafe { _vsha512h2q_u64(a, b, c) }
22995}
22996#[doc = "SHA512 hash update part 1"]
22997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"]
22998#[inline(always)]
22999#[target_feature(enable = "neon,sha3")]
23000#[cfg_attr(test, assert_instr(sha512h))]
23001#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23002pub fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
23003    unsafe extern "unadjusted" {
23004        #[cfg_attr(
23005            any(target_arch = "aarch64", target_arch = "arm64ec"),
23006            link_name = "llvm.aarch64.crypto.sha512h"
23007        )]
23008        fn _vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
23009    }
23010    unsafe { _vsha512hq_u64(a, b, c) }
23011}
23012#[doc = "SHA512 schedule update 0"]
23013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"]
23014#[inline(always)]
23015#[target_feature(enable = "neon,sha3")]
23016#[cfg_attr(test, assert_instr(sha512su0))]
23017#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23018pub fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
23019    unsafe extern "unadjusted" {
23020        #[cfg_attr(
23021            any(target_arch = "aarch64", target_arch = "arm64ec"),
23022            link_name = "llvm.aarch64.crypto.sha512su0"
23023        )]
23024        fn _vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
23025    }
23026    unsafe { _vsha512su0q_u64(a, b) }
23027}
23028#[doc = "SHA512 schedule update 1"]
23029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"]
23030#[inline(always)]
23031#[target_feature(enable = "neon,sha3")]
23032#[cfg_attr(test, assert_instr(sha512su1))]
23033#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
23034pub fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
23035    unsafe extern "unadjusted" {
23036        #[cfg_attr(
23037            any(target_arch = "aarch64", target_arch = "arm64ec"),
23038            link_name = "llvm.aarch64.crypto.sha512su1"
23039        )]
23040        fn _vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
23041    }
23042    unsafe { _vsha512su1q_u64(a, b, c) }
23043}
23044#[doc = "Signed Shift left"]
23045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"]
23046#[inline(always)]
23047#[target_feature(enable = "neon")]
23048#[cfg_attr(test, assert_instr(sshl))]
23049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23050pub fn vshld_s64(a: i64, b: i64) -> i64 {
23051    unsafe { transmute(vshl_s64(transmute(a), transmute(b))) }
23052}
23053#[doc = "Unsigned Shift left"]
23054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"]
23055#[inline(always)]
23056#[target_feature(enable = "neon")]
23057#[cfg_attr(test, assert_instr(ushl))]
23058#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23059pub fn vshld_u64(a: u64, b: i64) -> u64 {
23060    unsafe { transmute(vshl_u64(transmute(a), transmute(b))) }
23061}
23062#[doc = "Signed shift left long"]
23063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"]
23064#[inline(always)]
23065#[target_feature(enable = "neon")]
23066#[cfg_attr(test, assert_instr(sshll2, N = 2))]
23067#[rustc_legacy_const_generics(1)]
23068#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23069pub fn vshll_high_n_s8<const N: i32>(a: int8x16_t) -> int16x8_t {
23070    static_assert!(N >= 0 && N <= 8);
23071    unsafe {
23072        let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
23073        vshll_n_s8::<N>(b)
23074    }
23075}
23076#[doc = "Signed shift left long"]
23077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"]
23078#[inline(always)]
23079#[target_feature(enable = "neon")]
23080#[cfg_attr(test, assert_instr(sshll2, N = 2))]
23081#[rustc_legacy_const_generics(1)]
23082#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23083pub fn vshll_high_n_s16<const N: i32>(a: int16x8_t) -> int32x4_t {
23084    static_assert!(N >= 0 && N <= 16);
23085    unsafe {
23086        let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
23087        vshll_n_s16::<N>(b)
23088    }
23089}
23090#[doc = "Signed shift left long"]
23091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"]
23092#[inline(always)]
23093#[target_feature(enable = "neon")]
23094#[cfg_attr(test, assert_instr(sshll2, N = 2))]
23095#[rustc_legacy_const_generics(1)]
23096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23097pub fn vshll_high_n_s32<const N: i32>(a: int32x4_t) -> int64x2_t {
23098    static_assert!(N >= 0 && N <= 32);
23099    unsafe {
23100        let b: int32x2_t = simd_shuffle!(a, a, [2, 3]);
23101        vshll_n_s32::<N>(b)
23102    }
23103}
23104#[doc = "Signed shift left long"]
23105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"]
23106#[inline(always)]
23107#[target_feature(enable = "neon")]
23108#[cfg_attr(test, assert_instr(ushll2, N = 2))]
23109#[rustc_legacy_const_generics(1)]
23110#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23111pub fn vshll_high_n_u8<const N: i32>(a: uint8x16_t) -> uint16x8_t {
23112    static_assert!(N >= 0 && N <= 8);
23113    unsafe {
23114        let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
23115        vshll_n_u8::<N>(b)
23116    }
23117}
23118#[doc = "Signed shift left long"]
23119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"]
23120#[inline(always)]
23121#[target_feature(enable = "neon")]
23122#[cfg_attr(test, assert_instr(ushll2, N = 2))]
23123#[rustc_legacy_const_generics(1)]
23124#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23125pub fn vshll_high_n_u16<const N: i32>(a: uint16x8_t) -> uint32x4_t {
23126    static_assert!(N >= 0 && N <= 16);
23127    unsafe {
23128        let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
23129        vshll_n_u16::<N>(b)
23130    }
23131}
23132#[doc = "Signed shift left long"]
23133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"]
23134#[inline(always)]
23135#[target_feature(enable = "neon")]
23136#[cfg_attr(test, assert_instr(ushll2, N = 2))]
23137#[rustc_legacy_const_generics(1)]
23138#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23139pub fn vshll_high_n_u32<const N: i32>(a: uint32x4_t) -> uint64x2_t {
23140    static_assert!(N >= 0 && N <= 32);
23141    unsafe {
23142        let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
23143        vshll_n_u32::<N>(b)
23144    }
23145}
23146#[doc = "Shift right narrow"]
23147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"]
23148#[inline(always)]
23149#[target_feature(enable = "neon")]
23150#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23151#[rustc_legacy_const_generics(2)]
23152#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23153pub fn vshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
23154    static_assert!(N >= 1 && N <= 8);
23155    unsafe {
23156        simd_shuffle!(
23157            a,
23158            vshrn_n_s16::<N>(b),
23159            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23160        )
23161    }
23162}
23163#[doc = "Shift right narrow"]
23164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"]
23165#[inline(always)]
23166#[target_feature(enable = "neon")]
23167#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23168#[rustc_legacy_const_generics(2)]
23169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23170pub fn vshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
23171    static_assert!(N >= 1 && N <= 16);
23172    unsafe { simd_shuffle!(a, vshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
23173}
23174#[doc = "Shift right narrow"]
23175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"]
23176#[inline(always)]
23177#[target_feature(enable = "neon")]
23178#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23179#[rustc_legacy_const_generics(2)]
23180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23181pub fn vshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
23182    static_assert!(N >= 1 && N <= 32);
23183    unsafe { simd_shuffle!(a, vshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
23184}
23185#[doc = "Shift right narrow"]
23186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"]
23187#[inline(always)]
23188#[target_feature(enable = "neon")]
23189#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23190#[rustc_legacy_const_generics(2)]
23191#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23192pub fn vshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
23193    static_assert!(N >= 1 && N <= 8);
23194    unsafe {
23195        simd_shuffle!(
23196            a,
23197            vshrn_n_u16::<N>(b),
23198            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
23199        )
23200    }
23201}
23202#[doc = "Shift right narrow"]
23203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"]
23204#[inline(always)]
23205#[target_feature(enable = "neon")]
23206#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23207#[rustc_legacy_const_generics(2)]
23208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23209pub fn vshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
23210    static_assert!(N >= 1 && N <= 16);
23211    unsafe { simd_shuffle!(a, vshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
23212}
23213#[doc = "Shift right narrow"]
23214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"]
23215#[inline(always)]
23216#[target_feature(enable = "neon")]
23217#[cfg_attr(test, assert_instr(shrn2, N = 2))]
23218#[rustc_legacy_const_generics(2)]
23219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23220pub fn vshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
23221    static_assert!(N >= 1 && N <= 32);
23222    unsafe { simd_shuffle!(a, vshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
23223}
23224#[doc = "Shift Left and Insert (immediate)"]
23225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"]
23226#[inline(always)]
23227#[target_feature(enable = "neon")]
23228#[cfg_attr(test, assert_instr(sli, N = 1))]
23229#[rustc_legacy_const_generics(2)]
23230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23231pub fn vsli_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
23232    static_assert_uimm_bits!(N, 3);
23233    unsafe extern "unadjusted" {
23234        #[cfg_attr(
23235            any(target_arch = "aarch64", target_arch = "arm64ec"),
23236            link_name = "llvm.aarch64.neon.vsli.v8i8"
23237        )]
23238        fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
23239    }
23240    unsafe { _vsli_n_s8(a, b, N) }
23241}
23242#[doc = "Shift Left and Insert (immediate)"]
23243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"]
23244#[inline(always)]
23245#[target_feature(enable = "neon")]
23246#[cfg_attr(test, assert_instr(sli, N = 1))]
23247#[rustc_legacy_const_generics(2)]
23248#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23249pub fn vsliq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
23250    static_assert_uimm_bits!(N, 3);
23251    unsafe extern "unadjusted" {
23252        #[cfg_attr(
23253            any(target_arch = "aarch64", target_arch = "arm64ec"),
23254            link_name = "llvm.aarch64.neon.vsli.v16i8"
23255        )]
23256        fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
23257    }
23258    unsafe { _vsliq_n_s8(a, b, N) }
23259}
23260#[doc = "Shift Left and Insert (immediate)"]
23261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"]
23262#[inline(always)]
23263#[target_feature(enable = "neon")]
23264#[cfg_attr(test, assert_instr(sli, N = 1))]
23265#[rustc_legacy_const_generics(2)]
23266#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23267pub fn vsli_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
23268    static_assert_uimm_bits!(N, 4);
23269    unsafe extern "unadjusted" {
23270        #[cfg_attr(
23271            any(target_arch = "aarch64", target_arch = "arm64ec"),
23272            link_name = "llvm.aarch64.neon.vsli.v4i16"
23273        )]
23274        fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
23275    }
23276    unsafe { _vsli_n_s16(a, b, N) }
23277}
23278#[doc = "Shift Left and Insert (immediate)"]
23279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"]
23280#[inline(always)]
23281#[target_feature(enable = "neon")]
23282#[cfg_attr(test, assert_instr(sli, N = 1))]
23283#[rustc_legacy_const_generics(2)]
23284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23285pub fn vsliq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
23286    static_assert_uimm_bits!(N, 4);
23287    unsafe extern "unadjusted" {
23288        #[cfg_attr(
23289            any(target_arch = "aarch64", target_arch = "arm64ec"),
23290            link_name = "llvm.aarch64.neon.vsli.v8i16"
23291        )]
23292        fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
23293    }
23294    unsafe { _vsliq_n_s16(a, b, N) }
23295}
23296#[doc = "Shift Left and Insert (immediate)"]
23297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"]
23298#[inline(always)]
23299#[target_feature(enable = "neon")]
23300#[cfg_attr(test, assert_instr(sli, N = 1))]
23301#[rustc_legacy_const_generics(2)]
23302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23303pub fn vsli_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
23304    static_assert!(N >= 0 && N <= 31);
23305    unsafe extern "unadjusted" {
23306        #[cfg_attr(
23307            any(target_arch = "aarch64", target_arch = "arm64ec"),
23308            link_name = "llvm.aarch64.neon.vsli.v2i32"
23309        )]
23310        fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
23311    }
23312    unsafe { _vsli_n_s32(a, b, N) }
23313}
23314#[doc = "Shift Left and Insert (immediate)"]
23315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"]
23316#[inline(always)]
23317#[target_feature(enable = "neon")]
23318#[cfg_attr(test, assert_instr(sli, N = 1))]
23319#[rustc_legacy_const_generics(2)]
23320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23321pub fn vsliq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
23322    static_assert!(N >= 0 && N <= 31);
23323    unsafe extern "unadjusted" {
23324        #[cfg_attr(
23325            any(target_arch = "aarch64", target_arch = "arm64ec"),
23326            link_name = "llvm.aarch64.neon.vsli.v4i32"
23327        )]
23328        fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
23329    }
23330    unsafe { _vsliq_n_s32(a, b, N) }
23331}
23332#[doc = "Shift Left and Insert (immediate)"]
23333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"]
23334#[inline(always)]
23335#[target_feature(enable = "neon")]
23336#[cfg_attr(test, assert_instr(sli, N = 1))]
23337#[rustc_legacy_const_generics(2)]
23338#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23339pub fn vsli_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
23340    static_assert!(N >= 0 && N <= 63);
23341    unsafe extern "unadjusted" {
23342        #[cfg_attr(
23343            any(target_arch = "aarch64", target_arch = "arm64ec"),
23344            link_name = "llvm.aarch64.neon.vsli.v1i64"
23345        )]
23346        fn _vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
23347    }
23348    unsafe { _vsli_n_s64(a, b, N) }
23349}
23350#[doc = "Shift Left and Insert (immediate)"]
23351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"]
23352#[inline(always)]
23353#[target_feature(enable = "neon")]
23354#[cfg_attr(test, assert_instr(sli, N = 1))]
23355#[rustc_legacy_const_generics(2)]
23356#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23357pub fn vsliq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
23358    static_assert!(N >= 0 && N <= 63);
23359    unsafe extern "unadjusted" {
23360        #[cfg_attr(
23361            any(target_arch = "aarch64", target_arch = "arm64ec"),
23362            link_name = "llvm.aarch64.neon.vsli.v2i64"
23363        )]
23364        fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
23365    }
23366    unsafe { _vsliq_n_s64(a, b, N) }
23367}
23368#[doc = "Shift Left and Insert (immediate)"]
23369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"]
23370#[inline(always)]
23371#[target_feature(enable = "neon")]
23372#[cfg_attr(test, assert_instr(sli, N = 1))]
23373#[rustc_legacy_const_generics(2)]
23374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23375pub fn vsli_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
23376    static_assert_uimm_bits!(N, 3);
23377    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
23378}
23379#[doc = "Shift Left and Insert (immediate)"]
23380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"]
23381#[inline(always)]
23382#[target_feature(enable = "neon")]
23383#[cfg_attr(test, assert_instr(sli, N = 1))]
23384#[rustc_legacy_const_generics(2)]
23385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23386pub fn vsliq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
23387    static_assert_uimm_bits!(N, 3);
23388    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
23389}
23390#[doc = "Shift Left and Insert (immediate)"]
23391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"]
23392#[inline(always)]
23393#[target_feature(enable = "neon")]
23394#[cfg_attr(test, assert_instr(sli, N = 1))]
23395#[rustc_legacy_const_generics(2)]
23396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23397pub fn vsli_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
23398    static_assert_uimm_bits!(N, 4);
23399    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
23400}
23401#[doc = "Shift Left and Insert (immediate)"]
23402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"]
23403#[inline(always)]
23404#[target_feature(enable = "neon")]
23405#[cfg_attr(test, assert_instr(sli, N = 1))]
23406#[rustc_legacy_const_generics(2)]
23407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23408pub fn vsliq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
23409    static_assert_uimm_bits!(N, 4);
23410    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
23411}
23412#[doc = "Shift Left and Insert (immediate)"]
23413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"]
23414#[inline(always)]
23415#[target_feature(enable = "neon")]
23416#[cfg_attr(test, assert_instr(sli, N = 1))]
23417#[rustc_legacy_const_generics(2)]
23418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23419pub fn vsli_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
23420    static_assert!(N >= 0 && N <= 31);
23421    unsafe { transmute(vsli_n_s32::<N>(transmute(a), transmute(b))) }
23422}
23423#[doc = "Shift Left and Insert (immediate)"]
23424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"]
23425#[inline(always)]
23426#[target_feature(enable = "neon")]
23427#[cfg_attr(test, assert_instr(sli, N = 1))]
23428#[rustc_legacy_const_generics(2)]
23429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23430pub fn vsliq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
23431    static_assert!(N >= 0 && N <= 31);
23432    unsafe { transmute(vsliq_n_s32::<N>(transmute(a), transmute(b))) }
23433}
23434#[doc = "Shift Left and Insert (immediate)"]
23435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"]
23436#[inline(always)]
23437#[target_feature(enable = "neon")]
23438#[cfg_attr(test, assert_instr(sli, N = 1))]
23439#[rustc_legacy_const_generics(2)]
23440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23441pub fn vsli_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
23442    static_assert!(N >= 0 && N <= 63);
23443    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
23444}
23445#[doc = "Shift Left and Insert (immediate)"]
23446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"]
23447#[inline(always)]
23448#[target_feature(enable = "neon")]
23449#[cfg_attr(test, assert_instr(sli, N = 1))]
23450#[rustc_legacy_const_generics(2)]
23451#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23452pub fn vsliq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
23453    static_assert!(N >= 0 && N <= 63);
23454    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
23455}
23456#[doc = "Shift Left and Insert (immediate)"]
23457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"]
23458#[inline(always)]
23459#[target_feature(enable = "neon")]
23460#[cfg_attr(test, assert_instr(sli, N = 1))]
23461#[rustc_legacy_const_generics(2)]
23462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23463pub fn vsli_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
23464    static_assert_uimm_bits!(N, 3);
23465    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
23466}
23467#[doc = "Shift Left and Insert (immediate)"]
23468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"]
23469#[inline(always)]
23470#[target_feature(enable = "neon")]
23471#[cfg_attr(test, assert_instr(sli, N = 1))]
23472#[rustc_legacy_const_generics(2)]
23473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23474pub fn vsliq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
23475    static_assert_uimm_bits!(N, 3);
23476    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
23477}
23478#[doc = "Shift Left and Insert (immediate)"]
23479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"]
23480#[inline(always)]
23481#[target_feature(enable = "neon")]
23482#[cfg_attr(test, assert_instr(sli, N = 1))]
23483#[rustc_legacy_const_generics(2)]
23484#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23485pub fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
23486    static_assert_uimm_bits!(N, 4);
23487    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
23488}
23489#[doc = "Shift Left and Insert (immediate)"]
23490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"]
23491#[inline(always)]
23492#[target_feature(enable = "neon")]
23493#[cfg_attr(test, assert_instr(sli, N = 1))]
23494#[rustc_legacy_const_generics(2)]
23495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23496pub fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
23497    static_assert_uimm_bits!(N, 4);
23498    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
23499}
23500#[doc = "Shift Left and Insert (immediate)"]
23501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"]
23502#[inline(always)]
23503#[target_feature(enable = "neon,aes")]
23504#[cfg_attr(test, assert_instr(sli, N = 1))]
23505#[rustc_legacy_const_generics(2)]
23506#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23507pub fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
23508    static_assert!(N >= 0 && N <= 63);
23509    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
23510}
23511#[doc = "Shift Left and Insert (immediate)"]
23512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"]
23513#[inline(always)]
23514#[target_feature(enable = "neon,aes")]
23515#[cfg_attr(test, assert_instr(sli, N = 1))]
23516#[rustc_legacy_const_generics(2)]
23517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23518pub fn vsliq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
23519    static_assert!(N >= 0 && N <= 63);
23520    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
23521}
23522#[doc = "Shift left and insert"]
23523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"]
23524#[inline(always)]
23525#[target_feature(enable = "neon")]
23526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23527#[rustc_legacy_const_generics(2)]
23528#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
23529pub fn vslid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
23530    static_assert!(N >= 0 && N <= 63);
23531    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
23532}
23533#[doc = "Shift left and insert"]
23534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"]
23535#[inline(always)]
23536#[target_feature(enable = "neon")]
23537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23538#[rustc_legacy_const_generics(2)]
23539#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
23540pub fn vslid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
23541    static_assert!(N >= 0 && N <= 63);
23542    unsafe { transmute(vsli_n_u64::<N>(transmute(a), transmute(b))) }
23543}
23544#[doc = "SM3PARTW1"]
23545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"]
23546#[inline(always)]
23547#[target_feature(enable = "neon,sm4")]
23548#[cfg_attr(test, assert_instr(sm3partw1))]
23549#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23550pub fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23551    unsafe extern "unadjusted" {
23552        #[cfg_attr(
23553            any(target_arch = "aarch64", target_arch = "arm64ec"),
23554            link_name = "llvm.aarch64.crypto.sm3partw1"
23555        )]
23556        fn _vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
23557    }
23558    unsafe { _vsm3partw1q_u32(a, b, c) }
23559}
23560#[doc = "SM3PARTW2"]
23561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"]
23562#[inline(always)]
23563#[target_feature(enable = "neon,sm4")]
23564#[cfg_attr(test, assert_instr(sm3partw2))]
23565#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23566pub fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23567    unsafe extern "unadjusted" {
23568        #[cfg_attr(
23569            any(target_arch = "aarch64", target_arch = "arm64ec"),
23570            link_name = "llvm.aarch64.crypto.sm3partw2"
23571        )]
23572        fn _vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
23573    }
23574    unsafe { _vsm3partw2q_u32(a, b, c) }
23575}
23576#[doc = "SM3SS1"]
23577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"]
23578#[inline(always)]
23579#[target_feature(enable = "neon,sm4")]
23580#[cfg_attr(test, assert_instr(sm3ss1))]
23581#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23582pub fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23583    unsafe extern "unadjusted" {
23584        #[cfg_attr(
23585            any(target_arch = "aarch64", target_arch = "arm64ec"),
23586            link_name = "llvm.aarch64.crypto.sm3ss1"
23587        )]
23588        fn _vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
23589    }
23590    unsafe { _vsm3ss1q_u32(a, b, c) }
23591}
23592#[doc = "SM3TT1A"]
23593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"]
23594#[inline(always)]
23595#[target_feature(enable = "neon,sm4")]
23596#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))]
23597#[rustc_legacy_const_generics(3)]
23598#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23599pub fn vsm3tt1aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23600    static_assert_uimm_bits!(IMM2, 2);
23601    unsafe extern "unadjusted" {
23602        #[cfg_attr(
23603            any(target_arch = "aarch64", target_arch = "arm64ec"),
23604            link_name = "llvm.aarch64.crypto.sm3tt1a"
23605        )]
23606        fn _vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23607    }
23608    unsafe { _vsm3tt1aq_u32(a, b, c, IMM2 as i64) }
23609}
23610#[doc = "SM3TT1B"]
23611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"]
23612#[inline(always)]
23613#[target_feature(enable = "neon,sm4")]
23614#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))]
23615#[rustc_legacy_const_generics(3)]
23616#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23617pub fn vsm3tt1bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23618    static_assert_uimm_bits!(IMM2, 2);
23619    unsafe extern "unadjusted" {
23620        #[cfg_attr(
23621            any(target_arch = "aarch64", target_arch = "arm64ec"),
23622            link_name = "llvm.aarch64.crypto.sm3tt1b"
23623        )]
23624        fn _vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23625    }
23626    unsafe { _vsm3tt1bq_u32(a, b, c, IMM2 as i64) }
23627}
23628#[doc = "SM3TT2A"]
23629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"]
23630#[inline(always)]
23631#[target_feature(enable = "neon,sm4")]
23632#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))]
23633#[rustc_legacy_const_generics(3)]
23634#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23635pub fn vsm3tt2aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23636    static_assert_uimm_bits!(IMM2, 2);
23637    unsafe extern "unadjusted" {
23638        #[cfg_attr(
23639            any(target_arch = "aarch64", target_arch = "arm64ec"),
23640            link_name = "llvm.aarch64.crypto.sm3tt2a"
23641        )]
23642        fn _vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23643    }
23644    unsafe { _vsm3tt2aq_u32(a, b, c, IMM2 as i64) }
23645}
23646#[doc = "SM3TT2B"]
23647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"]
23648#[inline(always)]
23649#[target_feature(enable = "neon,sm4")]
23650#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))]
23651#[rustc_legacy_const_generics(3)]
23652#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23653pub fn vsm3tt2bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
23654    static_assert_uimm_bits!(IMM2, 2);
23655    unsafe extern "unadjusted" {
23656        #[cfg_attr(
23657            any(target_arch = "aarch64", target_arch = "arm64ec"),
23658            link_name = "llvm.aarch64.crypto.sm3tt2b"
23659        )]
23660        fn _vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
23661    }
23662    unsafe { _vsm3tt2bq_u32(a, b, c, IMM2 as i64) }
23663}
23664#[doc = "SM4 key"]
23665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"]
23666#[inline(always)]
23667#[target_feature(enable = "neon,sm4")]
23668#[cfg_attr(test, assert_instr(sm4ekey))]
23669#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23670pub fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
23671    unsafe extern "unadjusted" {
23672        #[cfg_attr(
23673            any(target_arch = "aarch64", target_arch = "arm64ec"),
23674            link_name = "llvm.aarch64.crypto.sm4ekey"
23675        )]
23676        fn _vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
23677    }
23678    unsafe { _vsm4ekeyq_u32(a, b) }
23679}
23680#[doc = "SM4 encode"]
23681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"]
23682#[inline(always)]
23683#[target_feature(enable = "neon,sm4")]
23684#[cfg_attr(test, assert_instr(sm4e))]
23685#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
23686pub fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
23687    unsafe extern "unadjusted" {
23688        #[cfg_attr(
23689            any(target_arch = "aarch64", target_arch = "arm64ec"),
23690            link_name = "llvm.aarch64.crypto.sm4e"
23691        )]
23692        fn _vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
23693    }
23694    unsafe { _vsm4eq_u32(a, b) }
23695}
23696#[doc = "Unsigned saturating Accumulate of Signed value."]
23697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"]
23698#[inline(always)]
23699#[target_feature(enable = "neon")]
23700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23701#[cfg_attr(test, assert_instr(usqadd))]
23702pub fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t {
23703    unsafe extern "unadjusted" {
23704        #[cfg_attr(
23705            any(target_arch = "aarch64", target_arch = "arm64ec"),
23706            link_name = "llvm.aarch64.neon.usqadd.v8i8"
23707        )]
23708        fn _vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t;
23709    }
23710    unsafe { _vsqadd_u8(a, b) }
23711}
23712#[doc = "Unsigned saturating Accumulate of Signed value."]
23713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"]
23714#[inline(always)]
23715#[target_feature(enable = "neon")]
23716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23717#[cfg_attr(test, assert_instr(usqadd))]
23718pub fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t {
23719    unsafe extern "unadjusted" {
23720        #[cfg_attr(
23721            any(target_arch = "aarch64", target_arch = "arm64ec"),
23722            link_name = "llvm.aarch64.neon.usqadd.v16i8"
23723        )]
23724        fn _vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t;
23725    }
23726    unsafe { _vsqaddq_u8(a, b) }
23727}
23728#[doc = "Unsigned saturating Accumulate of Signed value."]
23729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"]
23730#[inline(always)]
23731#[target_feature(enable = "neon")]
23732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23733#[cfg_attr(test, assert_instr(usqadd))]
23734pub fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t {
23735    unsafe extern "unadjusted" {
23736        #[cfg_attr(
23737            any(target_arch = "aarch64", target_arch = "arm64ec"),
23738            link_name = "llvm.aarch64.neon.usqadd.v4i16"
23739        )]
23740        fn _vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t;
23741    }
23742    unsafe { _vsqadd_u16(a, b) }
23743}
23744#[doc = "Unsigned saturating Accumulate of Signed value."]
23745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"]
23746#[inline(always)]
23747#[target_feature(enable = "neon")]
23748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23749#[cfg_attr(test, assert_instr(usqadd))]
23750pub fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t {
23751    unsafe extern "unadjusted" {
23752        #[cfg_attr(
23753            any(target_arch = "aarch64", target_arch = "arm64ec"),
23754            link_name = "llvm.aarch64.neon.usqadd.v8i16"
23755        )]
23756        fn _vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t;
23757    }
23758    unsafe { _vsqaddq_u16(a, b) }
23759}
23760#[doc = "Unsigned saturating Accumulate of Signed value."]
23761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"]
23762#[inline(always)]
23763#[target_feature(enable = "neon")]
23764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23765#[cfg_attr(test, assert_instr(usqadd))]
23766pub fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t {
23767    unsafe extern "unadjusted" {
23768        #[cfg_attr(
23769            any(target_arch = "aarch64", target_arch = "arm64ec"),
23770            link_name = "llvm.aarch64.neon.usqadd.v2i32"
23771        )]
23772        fn _vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t;
23773    }
23774    unsafe { _vsqadd_u32(a, b) }
23775}
23776#[doc = "Unsigned saturating Accumulate of Signed value."]
23777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"]
23778#[inline(always)]
23779#[target_feature(enable = "neon")]
23780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23781#[cfg_attr(test, assert_instr(usqadd))]
23782pub fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t {
23783    unsafe extern "unadjusted" {
23784        #[cfg_attr(
23785            any(target_arch = "aarch64", target_arch = "arm64ec"),
23786            link_name = "llvm.aarch64.neon.usqadd.v4i32"
23787        )]
23788        fn _vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t;
23789    }
23790    unsafe { _vsqaddq_u32(a, b) }
23791}
23792#[doc = "Unsigned saturating Accumulate of Signed value."]
23793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"]
23794#[inline(always)]
23795#[target_feature(enable = "neon")]
23796#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23797#[cfg_attr(test, assert_instr(usqadd))]
23798pub fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t {
23799    unsafe extern "unadjusted" {
23800        #[cfg_attr(
23801            any(target_arch = "aarch64", target_arch = "arm64ec"),
23802            link_name = "llvm.aarch64.neon.usqadd.v1i64"
23803        )]
23804        fn _vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t;
23805    }
23806    unsafe { _vsqadd_u64(a, b) }
23807}
23808#[doc = "Unsigned saturating Accumulate of Signed value."]
23809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"]
23810#[inline(always)]
23811#[target_feature(enable = "neon")]
23812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23813#[cfg_attr(test, assert_instr(usqadd))]
23814pub fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t {
23815    unsafe extern "unadjusted" {
23816        #[cfg_attr(
23817            any(target_arch = "aarch64", target_arch = "arm64ec"),
23818            link_name = "llvm.aarch64.neon.usqadd.v2i64"
23819        )]
23820        fn _vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t;
23821    }
23822    unsafe { _vsqaddq_u64(a, b) }
23823}
23824#[doc = "Unsigned saturating accumulate of signed value"]
23825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"]
23826#[inline(always)]
23827#[target_feature(enable = "neon")]
23828#[cfg_attr(test, assert_instr(usqadd))]
23829#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23830pub fn vsqaddb_u8(a: u8, b: i8) -> u8 {
23831    unsafe { simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) }
23832}
23833#[doc = "Unsigned saturating accumulate of signed value"]
23834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"]
23835#[inline(always)]
23836#[target_feature(enable = "neon")]
23837#[cfg_attr(test, assert_instr(usqadd))]
23838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23839pub fn vsqaddh_u16(a: u16, b: i16) -> u16 {
23840    unsafe { simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) }
23841}
23842#[doc = "Unsigned saturating accumulate of signed value"]
23843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"]
23844#[inline(always)]
23845#[target_feature(enable = "neon")]
23846#[cfg_attr(test, assert_instr(usqadd))]
23847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23848pub fn vsqaddd_u64(a: u64, b: i64) -> u64 {
23849    unsafe extern "unadjusted" {
23850        #[cfg_attr(
23851            any(target_arch = "aarch64", target_arch = "arm64ec"),
23852            link_name = "llvm.aarch64.neon.usqadd.i64"
23853        )]
23854        fn _vsqaddd_u64(a: u64, b: i64) -> u64;
23855    }
23856    unsafe { _vsqaddd_u64(a, b) }
23857}
23858#[doc = "Unsigned saturating accumulate of signed value"]
23859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"]
23860#[inline(always)]
23861#[target_feature(enable = "neon")]
23862#[cfg_attr(test, assert_instr(usqadd))]
23863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23864pub fn vsqadds_u32(a: u32, b: i32) -> u32 {
23865    unsafe extern "unadjusted" {
23866        #[cfg_attr(
23867            any(target_arch = "aarch64", target_arch = "arm64ec"),
23868            link_name = "llvm.aarch64.neon.usqadd.i32"
23869        )]
23870        fn _vsqadds_u32(a: u32, b: i32) -> u32;
23871    }
23872    unsafe { _vsqadds_u32(a, b) }
23873}
23874#[doc = "Calculates the square root of each lane."]
23875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f16)"]
23876#[inline(always)]
23877#[cfg_attr(test, assert_instr(fsqrt))]
23878#[target_feature(enable = "neon,fp16")]
23879#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23880#[cfg(not(target_arch = "arm64ec"))]
23881pub fn vsqrt_f16(a: float16x4_t) -> float16x4_t {
23882    unsafe { simd_fsqrt(a) }
23883}
23884#[doc = "Calculates the square root of each lane."]
23885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f16)"]
23886#[inline(always)]
23887#[cfg_attr(test, assert_instr(fsqrt))]
23888#[target_feature(enable = "neon,fp16")]
23889#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23890#[cfg(not(target_arch = "arm64ec"))]
23891pub fn vsqrtq_f16(a: float16x8_t) -> float16x8_t {
23892    unsafe { simd_fsqrt(a) }
23893}
23894#[doc = "Calculates the square root of each lane."]
23895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"]
23896#[inline(always)]
23897#[target_feature(enable = "neon")]
23898#[cfg_attr(test, assert_instr(fsqrt))]
23899#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23900pub fn vsqrt_f32(a: float32x2_t) -> float32x2_t {
23901    unsafe { simd_fsqrt(a) }
23902}
23903#[doc = "Calculates the square root of each lane."]
23904#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"]
23905#[inline(always)]
23906#[target_feature(enable = "neon")]
23907#[cfg_attr(test, assert_instr(fsqrt))]
23908#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23909pub fn vsqrtq_f32(a: float32x4_t) -> float32x4_t {
23910    unsafe { simd_fsqrt(a) }
23911}
23912#[doc = "Calculates the square root of each lane."]
23913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"]
23914#[inline(always)]
23915#[target_feature(enable = "neon")]
23916#[cfg_attr(test, assert_instr(fsqrt))]
23917#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23918pub fn vsqrt_f64(a: float64x1_t) -> float64x1_t {
23919    unsafe { simd_fsqrt(a) }
23920}
23921#[doc = "Calculates the square root of each lane."]
23922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"]
23923#[inline(always)]
23924#[target_feature(enable = "neon")]
23925#[cfg_attr(test, assert_instr(fsqrt))]
23926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23927pub fn vsqrtq_f64(a: float64x2_t) -> float64x2_t {
23928    unsafe { simd_fsqrt(a) }
23929}
23930#[doc = "Floating-point round to integral, using current rounding mode"]
23931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrth_f16)"]
23932#[inline(always)]
23933#[target_feature(enable = "neon,fp16")]
23934#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23935#[cfg(not(target_arch = "arm64ec"))]
23936#[cfg_attr(test, assert_instr(fsqrt))]
23937pub fn vsqrth_f16(a: f16) -> f16 {
23938    sqrtf16(a)
23939}
23940#[doc = "Shift Right and Insert (immediate)"]
23941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"]
23942#[inline(always)]
23943#[target_feature(enable = "neon")]
23944#[cfg_attr(test, assert_instr(sri, N = 1))]
23945#[rustc_legacy_const_generics(2)]
23946#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23947pub fn vsri_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
23948    static_assert!(N >= 1 && N <= 8);
23949    unsafe { super::shift_right_and_insert!(u8, 8, N, a, b) }
23950}
23951#[doc = "Shift Right and Insert (immediate)"]
23952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"]
23953#[inline(always)]
23954#[target_feature(enable = "neon")]
23955#[cfg_attr(test, assert_instr(sri, N = 1))]
23956#[rustc_legacy_const_generics(2)]
23957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23958pub fn vsriq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
23959    static_assert!(N >= 1 && N <= 8);
23960    unsafe { super::shift_right_and_insert!(u8, 16, N, a, b) }
23961}
23962#[doc = "Shift Right and Insert (immediate)"]
23963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"]
23964#[inline(always)]
23965#[target_feature(enable = "neon")]
23966#[cfg_attr(test, assert_instr(sri, N = 1))]
23967#[rustc_legacy_const_generics(2)]
23968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23969pub fn vsri_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
23970    static_assert!(N >= 1 && N <= 16);
23971    unsafe { super::shift_right_and_insert!(u16, 4, N, a, b) }
23972}
23973#[doc = "Shift Right and Insert (immediate)"]
23974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"]
23975#[inline(always)]
23976#[target_feature(enable = "neon")]
23977#[cfg_attr(test, assert_instr(sri, N = 1))]
23978#[rustc_legacy_const_generics(2)]
23979#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23980pub fn vsriq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
23981    static_assert!(N >= 1 && N <= 16);
23982    unsafe { super::shift_right_and_insert!(u16, 8, N, a, b) }
23983}
23984#[doc = "Shift Right and Insert (immediate)"]
23985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"]
23986#[inline(always)]
23987#[target_feature(enable = "neon")]
23988#[cfg_attr(test, assert_instr(sri, N = 1))]
23989#[rustc_legacy_const_generics(2)]
23990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23991pub fn vsri_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
23992    static_assert!(N >= 1 && N <= 32);
23993    unsafe { super::shift_right_and_insert!(u32, 2, N, a, b) }
23994}
23995#[doc = "Shift Right and Insert (immediate)"]
23996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"]
23997#[inline(always)]
23998#[target_feature(enable = "neon")]
23999#[cfg_attr(test, assert_instr(sri, N = 1))]
24000#[rustc_legacy_const_generics(2)]
24001#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24002pub fn vsriq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
24003    static_assert!(N >= 1 && N <= 32);
24004    unsafe { super::shift_right_and_insert!(u32, 4, N, a, b) }
24005}
24006#[doc = "Shift Right and Insert (immediate)"]
24007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"]
24008#[inline(always)]
24009#[target_feature(enable = "neon")]
24010#[cfg_attr(test, assert_instr(sri, N = 1))]
24011#[rustc_legacy_const_generics(2)]
24012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24013pub fn vsri_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
24014    static_assert!(N >= 1 && N <= 64);
24015    unsafe { super::shift_right_and_insert!(u64, 1, N, a, b) }
24016}
24017#[doc = "Shift Right and Insert (immediate)"]
24018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"]
24019#[inline(always)]
24020#[target_feature(enable = "neon")]
24021#[cfg_attr(test, assert_instr(sri, N = 1))]
24022#[rustc_legacy_const_generics(2)]
24023#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24024pub fn vsriq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
24025    static_assert!(N >= 1 && N <= 64);
24026    unsafe { super::shift_right_and_insert!(u64, 2, N, a, b) }
24027}
24028#[doc = "Shift Right and Insert (immediate)"]
24029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"]
24030#[inline(always)]
24031#[target_feature(enable = "neon")]
24032#[cfg_attr(test, assert_instr(sri, N = 1))]
24033#[rustc_legacy_const_generics(2)]
24034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24035pub fn vsri_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
24036    static_assert!(N >= 1 && N <= 8);
24037    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
24038}
24039#[doc = "Shift Right and Insert (immediate)"]
24040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"]
24041#[inline(always)]
24042#[target_feature(enable = "neon")]
24043#[cfg_attr(test, assert_instr(sri, N = 1))]
24044#[rustc_legacy_const_generics(2)]
24045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24046pub fn vsriq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
24047    static_assert!(N >= 1 && N <= 8);
24048    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
24049}
24050#[doc = "Shift Right and Insert (immediate)"]
24051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"]
24052#[inline(always)]
24053#[target_feature(enable = "neon")]
24054#[cfg_attr(test, assert_instr(sri, N = 1))]
24055#[rustc_legacy_const_generics(2)]
24056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24057pub fn vsri_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
24058    static_assert!(N >= 1 && N <= 16);
24059    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
24060}
24061#[doc = "Shift Right and Insert (immediate)"]
24062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"]
24063#[inline(always)]
24064#[target_feature(enable = "neon")]
24065#[cfg_attr(test, assert_instr(sri, N = 1))]
24066#[rustc_legacy_const_generics(2)]
24067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24068pub fn vsriq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
24069    static_assert!(N >= 1 && N <= 16);
24070    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
24071}
24072#[doc = "Shift Right and Insert (immediate)"]
24073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"]
24074#[inline(always)]
24075#[target_feature(enable = "neon")]
24076#[cfg_attr(test, assert_instr(sri, N = 1))]
24077#[rustc_legacy_const_generics(2)]
24078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24079pub fn vsri_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
24080    static_assert!(N >= 1 && N <= 32);
24081    unsafe { transmute(vsri_n_s32::<N>(transmute(a), transmute(b))) }
24082}
24083#[doc = "Shift Right and Insert (immediate)"]
24084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"]
24085#[inline(always)]
24086#[target_feature(enable = "neon")]
24087#[cfg_attr(test, assert_instr(sri, N = 1))]
24088#[rustc_legacy_const_generics(2)]
24089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24090pub fn vsriq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
24091    static_assert!(N >= 1 && N <= 32);
24092    unsafe { transmute(vsriq_n_s32::<N>(transmute(a), transmute(b))) }
24093}
24094#[doc = "Shift Right and Insert (immediate)"]
24095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"]
24096#[inline(always)]
24097#[target_feature(enable = "neon")]
24098#[cfg_attr(test, assert_instr(sri, N = 1))]
24099#[rustc_legacy_const_generics(2)]
24100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24101pub fn vsri_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
24102    static_assert!(N >= 1 && N <= 64);
24103    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
24104}
24105#[doc = "Shift Right and Insert (immediate)"]
24106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"]
24107#[inline(always)]
24108#[target_feature(enable = "neon")]
24109#[cfg_attr(test, assert_instr(sri, N = 1))]
24110#[rustc_legacy_const_generics(2)]
24111#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24112pub fn vsriq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
24113    static_assert!(N >= 1 && N <= 64);
24114    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
24115}
24116#[doc = "Shift Right and Insert (immediate)"]
24117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"]
24118#[inline(always)]
24119#[target_feature(enable = "neon")]
24120#[cfg_attr(test, assert_instr(sri, N = 1))]
24121#[rustc_legacy_const_generics(2)]
24122#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24123pub fn vsri_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
24124    static_assert!(N >= 1 && N <= 8);
24125    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
24126}
24127#[doc = "Shift Right and Insert (immediate)"]
24128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"]
24129#[inline(always)]
24130#[target_feature(enable = "neon")]
24131#[cfg_attr(test, assert_instr(sri, N = 1))]
24132#[rustc_legacy_const_generics(2)]
24133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24134pub fn vsriq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
24135    static_assert!(N >= 1 && N <= 8);
24136    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
24137}
24138#[doc = "Shift Right and Insert (immediate)"]
24139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"]
24140#[inline(always)]
24141#[target_feature(enable = "neon")]
24142#[cfg_attr(test, assert_instr(sri, N = 1))]
24143#[rustc_legacy_const_generics(2)]
24144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24145pub fn vsri_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
24146    static_assert!(N >= 1 && N <= 16);
24147    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
24148}
24149#[doc = "Shift Right and Insert (immediate)"]
24150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"]
24151#[inline(always)]
24152#[target_feature(enable = "neon")]
24153#[cfg_attr(test, assert_instr(sri, N = 1))]
24154#[rustc_legacy_const_generics(2)]
24155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24156pub fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
24157    static_assert!(N >= 1 && N <= 16);
24158    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
24159}
24160#[doc = "Shift Right and Insert (immediate)"]
24161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"]
24162#[inline(always)]
24163#[target_feature(enable = "neon,aes")]
24164#[cfg_attr(test, assert_instr(sri, N = 1))]
24165#[rustc_legacy_const_generics(2)]
24166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24167pub fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
24168    static_assert!(N >= 1 && N <= 64);
24169    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
24170}
24171#[doc = "Shift Right and Insert (immediate)"]
24172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"]
24173#[inline(always)]
24174#[target_feature(enable = "neon,aes")]
24175#[cfg_attr(test, assert_instr(sri, N = 1))]
24176#[rustc_legacy_const_generics(2)]
24177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24178pub fn vsriq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
24179    static_assert!(N >= 1 && N <= 64);
24180    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
24181}
24182#[doc = "Shift right and insert"]
24183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"]
24184#[inline(always)]
24185#[target_feature(enable = "neon")]
24186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24187#[rustc_legacy_const_generics(2)]
24188#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(bfxil, N = 2))]
24189pub fn vsrid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
24190    static_assert!(N >= 1 && N <= 64);
24191    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
24192}
24193#[doc = "Shift right and insert"]
24194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"]
24195#[inline(always)]
24196#[target_feature(enable = "neon")]
24197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24198#[rustc_legacy_const_generics(2)]
24199#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(bfxil, N = 2))]
24200pub fn vsrid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
24201    static_assert!(N >= 1 && N <= 64);
24202    unsafe { transmute(vsri_n_u64::<N>(transmute(a), transmute(b))) }
24203}
24204#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16)"]
24206#[doc = "## Safety"]
24207#[doc = "  * Neon intrinsic unsafe"]
24208#[inline(always)]
24209#[target_feature(enable = "neon,fp16")]
24210#[cfg_attr(test, assert_instr(str))]
24211#[allow(clippy::cast_ptr_alignment)]
24212#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24213#[cfg(not(target_arch = "arm64ec"))]
24214pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) {
24215    crate::ptr::write_unaligned(ptr.cast(), a)
24216}
24217#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16)"]
24219#[doc = "## Safety"]
24220#[doc = "  * Neon intrinsic unsafe"]
24221#[inline(always)]
24222#[target_feature(enable = "neon,fp16")]
24223#[cfg_attr(test, assert_instr(str))]
24224#[allow(clippy::cast_ptr_alignment)]
24225#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24226#[cfg(not(target_arch = "arm64ec"))]
24227pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) {
24228    crate::ptr::write_unaligned(ptr.cast(), a)
24229}
24230#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"]
24232#[doc = "## Safety"]
24233#[doc = "  * Neon intrinsic unsafe"]
24234#[inline(always)]
24235#[target_feature(enable = "neon")]
24236#[cfg_attr(test, assert_instr(str))]
24237#[allow(clippy::cast_ptr_alignment)]
24238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24239pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) {
24240    crate::ptr::write_unaligned(ptr.cast(), a)
24241}
24242#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"]
24244#[doc = "## Safety"]
24245#[doc = "  * Neon intrinsic unsafe"]
24246#[inline(always)]
24247#[target_feature(enable = "neon")]
24248#[cfg_attr(test, assert_instr(str))]
24249#[allow(clippy::cast_ptr_alignment)]
24250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24251pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) {
24252    crate::ptr::write_unaligned(ptr.cast(), a)
24253}
24254#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64)"]
24256#[doc = "## Safety"]
24257#[doc = "  * Neon intrinsic unsafe"]
24258#[inline(always)]
24259#[target_feature(enable = "neon")]
24260#[cfg_attr(test, assert_instr(str))]
24261#[allow(clippy::cast_ptr_alignment)]
24262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24263pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) {
24264    crate::ptr::write_unaligned(ptr.cast(), a)
24265}
24266#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"]
24268#[doc = "## Safety"]
24269#[doc = "  * Neon intrinsic unsafe"]
24270#[inline(always)]
24271#[target_feature(enable = "neon")]
24272#[cfg_attr(test, assert_instr(str))]
24273#[allow(clippy::cast_ptr_alignment)]
24274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24275pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) {
24276    crate::ptr::write_unaligned(ptr.cast(), a)
24277}
24278#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"]
24280#[doc = "## Safety"]
24281#[doc = "  * Neon intrinsic unsafe"]
24282#[inline(always)]
24283#[target_feature(enable = "neon")]
24284#[cfg_attr(test, assert_instr(str))]
24285#[allow(clippy::cast_ptr_alignment)]
24286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24287pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) {
24288    crate::ptr::write_unaligned(ptr.cast(), a)
24289}
24290#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"]
24292#[doc = "## Safety"]
24293#[doc = "  * Neon intrinsic unsafe"]
24294#[inline(always)]
24295#[target_feature(enable = "neon")]
24296#[cfg_attr(test, assert_instr(str))]
24297#[allow(clippy::cast_ptr_alignment)]
24298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24299pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) {
24300    crate::ptr::write_unaligned(ptr.cast(), a)
24301}
24302#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"]
24304#[doc = "## Safety"]
24305#[doc = "  * Neon intrinsic unsafe"]
24306#[inline(always)]
24307#[target_feature(enable = "neon")]
24308#[cfg_attr(test, assert_instr(str))]
24309#[allow(clippy::cast_ptr_alignment)]
24310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24311pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) {
24312    crate::ptr::write_unaligned(ptr.cast(), a)
24313}
24314#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"]
24316#[doc = "## Safety"]
24317#[doc = "  * Neon intrinsic unsafe"]
24318#[inline(always)]
24319#[target_feature(enable = "neon")]
24320#[cfg_attr(test, assert_instr(str))]
24321#[allow(clippy::cast_ptr_alignment)]
24322#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24323pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) {
24324    crate::ptr::write_unaligned(ptr.cast(), a)
24325}
24326#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"]
24328#[doc = "## Safety"]
24329#[doc = "  * Neon intrinsic unsafe"]
24330#[inline(always)]
24331#[target_feature(enable = "neon")]
24332#[cfg_attr(test, assert_instr(str))]
24333#[allow(clippy::cast_ptr_alignment)]
24334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24335pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) {
24336    crate::ptr::write_unaligned(ptr.cast(), a)
24337}
24338#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"]
24340#[doc = "## Safety"]
24341#[doc = "  * Neon intrinsic unsafe"]
24342#[inline(always)]
24343#[target_feature(enable = "neon")]
24344#[cfg_attr(test, assert_instr(str))]
24345#[allow(clippy::cast_ptr_alignment)]
24346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24347pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) {
24348    crate::ptr::write_unaligned(ptr.cast(), a)
24349}
24350#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"]
24352#[doc = "## Safety"]
24353#[doc = "  * Neon intrinsic unsafe"]
24354#[inline(always)]
24355#[target_feature(enable = "neon")]
24356#[cfg_attr(test, assert_instr(str))]
24357#[allow(clippy::cast_ptr_alignment)]
24358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24359pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) {
24360    crate::ptr::write_unaligned(ptr.cast(), a)
24361}
24362#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"]
24364#[doc = "## Safety"]
24365#[doc = "  * Neon intrinsic unsafe"]
24366#[inline(always)]
24367#[target_feature(enable = "neon")]
24368#[cfg_attr(test, assert_instr(str))]
24369#[allow(clippy::cast_ptr_alignment)]
24370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24371pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) {
24372    crate::ptr::write_unaligned(ptr.cast(), a)
24373}
24374#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"]
24376#[doc = "## Safety"]
24377#[doc = "  * Neon intrinsic unsafe"]
24378#[inline(always)]
24379#[target_feature(enable = "neon")]
24380#[cfg_attr(test, assert_instr(str))]
24381#[allow(clippy::cast_ptr_alignment)]
24382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24383pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) {
24384    crate::ptr::write_unaligned(ptr.cast(), a)
24385}
24386#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"]
24388#[doc = "## Safety"]
24389#[doc = "  * Neon intrinsic unsafe"]
24390#[inline(always)]
24391#[target_feature(enable = "neon")]
24392#[cfg_attr(test, assert_instr(str))]
24393#[allow(clippy::cast_ptr_alignment)]
24394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24395pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) {
24396    crate::ptr::write_unaligned(ptr.cast(), a)
24397}
24398#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"]
24400#[doc = "## Safety"]
24401#[doc = "  * Neon intrinsic unsafe"]
24402#[inline(always)]
24403#[target_feature(enable = "neon")]
24404#[cfg_attr(test, assert_instr(str))]
24405#[allow(clippy::cast_ptr_alignment)]
24406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24407pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) {
24408    crate::ptr::write_unaligned(ptr.cast(), a)
24409}
24410#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"]
24412#[doc = "## Safety"]
24413#[doc = "  * Neon intrinsic unsafe"]
24414#[inline(always)]
24415#[target_feature(enable = "neon")]
24416#[cfg_attr(test, assert_instr(str))]
24417#[allow(clippy::cast_ptr_alignment)]
24418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24419pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) {
24420    crate::ptr::write_unaligned(ptr.cast(), a)
24421}
24422#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"]
24424#[doc = "## Safety"]
24425#[doc = "  * Neon intrinsic unsafe"]
24426#[inline(always)]
24427#[target_feature(enable = "neon")]
24428#[cfg_attr(test, assert_instr(str))]
24429#[allow(clippy::cast_ptr_alignment)]
24430#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24431pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) {
24432    crate::ptr::write_unaligned(ptr.cast(), a)
24433}
24434#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"]
24436#[doc = "## Safety"]
24437#[doc = "  * Neon intrinsic unsafe"]
24438#[inline(always)]
24439#[target_feature(enable = "neon")]
24440#[cfg_attr(test, assert_instr(str))]
24441#[allow(clippy::cast_ptr_alignment)]
24442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24443pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) {
24444    crate::ptr::write_unaligned(ptr.cast(), a)
24445}
24446#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"]
24448#[doc = "## Safety"]
24449#[doc = "  * Neon intrinsic unsafe"]
24450#[inline(always)]
24451#[target_feature(enable = "neon")]
24452#[cfg_attr(test, assert_instr(str))]
24453#[allow(clippy::cast_ptr_alignment)]
24454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24455pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) {
24456    crate::ptr::write_unaligned(ptr.cast(), a)
24457}
24458#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"]
24460#[doc = "## Safety"]
24461#[doc = "  * Neon intrinsic unsafe"]
24462#[inline(always)]
24463#[target_feature(enable = "neon")]
24464#[cfg_attr(test, assert_instr(str))]
24465#[allow(clippy::cast_ptr_alignment)]
24466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24467pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) {
24468    crate::ptr::write_unaligned(ptr.cast(), a)
24469}
24470#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"]
24472#[doc = "## Safety"]
24473#[doc = "  * Neon intrinsic unsafe"]
24474#[inline(always)]
24475#[target_feature(enable = "neon")]
24476#[cfg_attr(test, assert_instr(str))]
24477#[allow(clippy::cast_ptr_alignment)]
24478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24479pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) {
24480    crate::ptr::write_unaligned(ptr.cast(), a)
24481}
24482#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"]
24484#[doc = "## Safety"]
24485#[doc = "  * Neon intrinsic unsafe"]
24486#[inline(always)]
24487#[target_feature(enable = "neon")]
24488#[cfg_attr(test, assert_instr(str))]
24489#[allow(clippy::cast_ptr_alignment)]
24490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24491pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) {
24492    crate::ptr::write_unaligned(ptr.cast(), a)
24493}
24494#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"]
24496#[doc = "## Safety"]
24497#[doc = "  * Neon intrinsic unsafe"]
24498#[inline(always)]
24499#[target_feature(enable = "neon")]
24500#[cfg_attr(test, assert_instr(str))]
24501#[allow(clippy::cast_ptr_alignment)]
24502#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24503pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) {
24504    crate::ptr::write_unaligned(ptr.cast(), a)
24505}
24506#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"]
24508#[doc = "## Safety"]
24509#[doc = "  * Neon intrinsic unsafe"]
24510#[inline(always)]
24511#[target_feature(enable = "neon")]
24512#[cfg_attr(test, assert_instr(str))]
24513#[allow(clippy::cast_ptr_alignment)]
24514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24515pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) {
24516    crate::ptr::write_unaligned(ptr.cast(), a)
24517}
24518#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"]
24520#[doc = "## Safety"]
24521#[doc = "  * Neon intrinsic unsafe"]
24522#[inline(always)]
24523#[target_feature(enable = "neon,aes")]
24524#[cfg_attr(test, assert_instr(str))]
24525#[allow(clippy::cast_ptr_alignment)]
24526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24527pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) {
24528    crate::ptr::write_unaligned(ptr.cast(), a)
24529}
24530#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
24531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"]
24532#[doc = "## Safety"]
24533#[doc = "  * Neon intrinsic unsafe"]
24534#[inline(always)]
24535#[target_feature(enable = "neon,aes")]
24536#[cfg_attr(test, assert_instr(str))]
24537#[allow(clippy::cast_ptr_alignment)]
24538#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24539pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) {
24540    crate::ptr::write_unaligned(ptr.cast(), a)
24541}
24542#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24543#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"]
24544#[doc = "## Safety"]
24545#[doc = "  * Neon intrinsic unsafe"]
24546#[inline(always)]
24547#[target_feature(enable = "neon")]
24548#[cfg_attr(test, assert_instr(st1))]
24549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24550pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) {
24551    unsafe extern "unadjusted" {
24552        #[cfg_attr(
24553            any(target_arch = "aarch64", target_arch = "arm64ec"),
24554            link_name = "llvm.aarch64.neon.st1x2.v1f64.p0"
24555        )]
24556        fn _vst1_f64_x2(a: float64x1_t, b: float64x1_t, ptr: *mut f64);
24557    }
24558    _vst1_f64_x2(b.0, b.1, a)
24559}
24560#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"]
24562#[doc = "## Safety"]
24563#[doc = "  * Neon intrinsic unsafe"]
24564#[inline(always)]
24565#[target_feature(enable = "neon")]
24566#[cfg_attr(test, assert_instr(st1))]
24567#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24568pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) {
24569    unsafe extern "unadjusted" {
24570        #[cfg_attr(
24571            any(target_arch = "aarch64", target_arch = "arm64ec"),
24572            link_name = "llvm.aarch64.neon.st1x2.v2f64.p0"
24573        )]
24574        fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64);
24575    }
24576    _vst1q_f64_x2(b.0, b.1, a)
24577}
24578#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"]
24580#[doc = "## Safety"]
24581#[doc = "  * Neon intrinsic unsafe"]
24582#[inline(always)]
24583#[target_feature(enable = "neon")]
24584#[cfg_attr(test, assert_instr(st1))]
24585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24586pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) {
24587    unsafe extern "unadjusted" {
24588        #[cfg_attr(
24589            any(target_arch = "aarch64", target_arch = "arm64ec"),
24590            link_name = "llvm.aarch64.neon.st1x3.v1f64.p0"
24591        )]
24592        fn _vst1_f64_x3(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64);
24593    }
24594    _vst1_f64_x3(b.0, b.1, b.2, a)
24595}
24596#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"]
24598#[doc = "## Safety"]
24599#[doc = "  * Neon intrinsic unsafe"]
24600#[inline(always)]
24601#[target_feature(enable = "neon")]
24602#[cfg_attr(test, assert_instr(st1))]
24603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24604pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) {
24605    unsafe extern "unadjusted" {
24606        #[cfg_attr(
24607            any(target_arch = "aarch64", target_arch = "arm64ec"),
24608            link_name = "llvm.aarch64.neon.st1x3.v2f64.p0"
24609        )]
24610        fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64);
24611    }
24612    _vst1q_f64_x3(b.0, b.1, b.2, a)
24613}
24614#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"]
24616#[doc = "## Safety"]
24617#[doc = "  * Neon intrinsic unsafe"]
24618#[inline(always)]
24619#[target_feature(enable = "neon")]
24620#[cfg_attr(test, assert_instr(st1))]
24621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24622pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) {
24623    unsafe extern "unadjusted" {
24624        #[cfg_attr(
24625            any(target_arch = "aarch64", target_arch = "arm64ec"),
24626            link_name = "llvm.aarch64.neon.st1x4.v1f64.p0"
24627        )]
24628        fn _vst1_f64_x4(
24629            a: float64x1_t,
24630            b: float64x1_t,
24631            c: float64x1_t,
24632            d: float64x1_t,
24633            ptr: *mut f64,
24634        );
24635    }
24636    _vst1_f64_x4(b.0, b.1, b.2, b.3, a)
24637}
24638#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
24639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"]
24640#[doc = "## Safety"]
24641#[doc = "  * Neon intrinsic unsafe"]
24642#[inline(always)]
24643#[target_feature(enable = "neon")]
24644#[cfg_attr(test, assert_instr(st1))]
24645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24646pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) {
24647    unsafe extern "unadjusted" {
24648        #[cfg_attr(
24649            any(target_arch = "aarch64", target_arch = "arm64ec"),
24650            link_name = "llvm.aarch64.neon.st1x4.v2f64.p0"
24651        )]
24652        fn _vst1q_f64_x4(
24653            a: float64x2_t,
24654            b: float64x2_t,
24655            c: float64x2_t,
24656            d: float64x2_t,
24657            ptr: *mut f64,
24658        );
24659    }
24660    _vst1q_f64_x4(b.0, b.1, b.2, b.3, a)
24661}
24662#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
24663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"]
24664#[doc = "## Safety"]
24665#[doc = "  * Neon intrinsic unsafe"]
24666#[inline(always)]
24667#[target_feature(enable = "neon")]
24668#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24669#[rustc_legacy_const_generics(2)]
24670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24671pub unsafe fn vst1_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1_t) {
24672    static_assert!(LANE == 0);
24673    *a = simd_extract!(b, LANE as u32);
24674}
24675#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
24676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"]
24677#[doc = "## Safety"]
24678#[doc = "  * Neon intrinsic unsafe"]
24679#[inline(always)]
24680#[target_feature(enable = "neon")]
24681#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24682#[rustc_legacy_const_generics(2)]
24683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24684pub unsafe fn vst1q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2_t) {
24685    static_assert_uimm_bits!(LANE, 1);
24686    *a = simd_extract!(b, LANE as u32);
24687}
24688#[doc = "Store multiple 2-element structures from two registers"]
24689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"]
24690#[doc = "## Safety"]
24691#[doc = "  * Neon intrinsic unsafe"]
24692#[inline(always)]
24693#[target_feature(enable = "neon")]
24694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24695#[cfg_attr(test, assert_instr(stp))]
24696pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) {
24697    core::ptr::write_unaligned(a.cast(), b)
24698}
24699#[doc = "Store multiple 2-element structures from two registers"]
24700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"]
24701#[doc = "## Safety"]
24702#[doc = "  * Neon intrinsic unsafe"]
24703#[inline(always)]
24704#[target_feature(enable = "neon")]
24705#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24706#[rustc_legacy_const_generics(2)]
24707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24708pub unsafe fn vst2_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x2_t) {
24709    static_assert!(LANE == 0);
24710    unsafe extern "unadjusted" {
24711        #[cfg_attr(
24712            any(target_arch = "aarch64", target_arch = "arm64ec"),
24713            link_name = "llvm.aarch64.neon.st2lane.v1f64.p0"
24714        )]
24715        fn _vst2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8);
24716    }
24717    _vst2_lane_f64(b.0, b.1, LANE as i64, a as _)
24718}
24719#[doc = "Store multiple 2-element structures from two registers"]
24720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"]
24721#[doc = "## Safety"]
24722#[doc = "  * Neon intrinsic unsafe"]
24723#[inline(always)]
24724#[target_feature(enable = "neon")]
24725#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24726#[rustc_legacy_const_generics(2)]
24727#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24728pub unsafe fn vst2_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x2_t) {
24729    static_assert!(LANE == 0);
24730    unsafe extern "unadjusted" {
24731        #[cfg_attr(
24732            any(target_arch = "aarch64", target_arch = "arm64ec"),
24733            link_name = "llvm.aarch64.neon.st2lane.v1i64.p0"
24734        )]
24735        fn _vst2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8);
24736    }
24737    _vst2_lane_s64(b.0, b.1, LANE as i64, a as _)
24738}
24739#[doc = "Store multiple 2-element structures from two registers"]
24740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"]
24741#[doc = "## Safety"]
24742#[doc = "  * Neon intrinsic unsafe"]
24743#[inline(always)]
24744#[target_feature(enable = "neon,aes")]
24745#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24746#[rustc_legacy_const_generics(2)]
24747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24748pub unsafe fn vst2_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x2_t) {
24749    static_assert!(LANE == 0);
24750    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
24751}
24752#[doc = "Store multiple 2-element structures from two registers"]
24753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"]
24754#[doc = "## Safety"]
24755#[doc = "  * Neon intrinsic unsafe"]
24756#[inline(always)]
24757#[target_feature(enable = "neon")]
24758#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24759#[rustc_legacy_const_generics(2)]
24760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24761pub unsafe fn vst2_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x2_t) {
24762    static_assert!(LANE == 0);
24763    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
24764}
24765#[doc = "Store multiple 2-element structures from two registers"]
24766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"]
24767#[doc = "## Safety"]
24768#[doc = "  * Neon intrinsic unsafe"]
24769#[inline(always)]
24770#[target_feature(enable = "neon")]
24771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24772#[cfg_attr(test, assert_instr(st2))]
24773pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) {
24774    crate::core_arch::macros::interleaving_store!(f64, 2, 2, a, b)
24775}
24776#[doc = "Store multiple 2-element structures from two registers"]
24777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"]
24778#[doc = "## Safety"]
24779#[doc = "  * Neon intrinsic unsafe"]
24780#[inline(always)]
24781#[target_feature(enable = "neon")]
24782#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24783#[cfg_attr(test, assert_instr(st2))]
24784pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) {
24785    crate::core_arch::macros::interleaving_store!(i64, 2, 2, a, b)
24786}
24787#[doc = "Store multiple 2-element structures from two registers"]
24788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"]
24789#[doc = "## Safety"]
24790#[doc = "  * Neon intrinsic unsafe"]
24791#[inline(always)]
24792#[target_feature(enable = "neon")]
24793#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24794#[rustc_legacy_const_generics(2)]
24795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24796pub unsafe fn vst2q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x2_t) {
24797    static_assert_uimm_bits!(LANE, 1);
24798    unsafe extern "unadjusted" {
24799        #[cfg_attr(
24800            any(target_arch = "aarch64", target_arch = "arm64ec"),
24801            link_name = "llvm.aarch64.neon.st2lane.v2f64.p0"
24802        )]
24803        fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8);
24804    }
24805    _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _)
24806}
24807#[doc = "Store multiple 2-element structures from two registers"]
24808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"]
24809#[doc = "## Safety"]
24810#[doc = "  * Neon intrinsic unsafe"]
24811#[inline(always)]
24812#[target_feature(enable = "neon")]
24813#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24814#[rustc_legacy_const_generics(2)]
24815#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24816pub unsafe fn vst2q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x2_t) {
24817    static_assert_uimm_bits!(LANE, 4);
24818    unsafe extern "unadjusted" {
24819        #[cfg_attr(
24820            any(target_arch = "aarch64", target_arch = "arm64ec"),
24821            link_name = "llvm.aarch64.neon.st2lane.v16i8.p0"
24822        )]
24823        fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8);
24824    }
24825    _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _)
24826}
24827#[doc = "Store multiple 2-element structures from two registers"]
24828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"]
24829#[doc = "## Safety"]
24830#[doc = "  * Neon intrinsic unsafe"]
24831#[inline(always)]
24832#[target_feature(enable = "neon")]
24833#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24834#[rustc_legacy_const_generics(2)]
24835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24836pub unsafe fn vst2q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x2_t) {
24837    static_assert_uimm_bits!(LANE, 1);
24838    unsafe extern "unadjusted" {
24839        #[cfg_attr(
24840            any(target_arch = "aarch64", target_arch = "arm64ec"),
24841            link_name = "llvm.aarch64.neon.st2lane.v2i64.p0"
24842        )]
24843        fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8);
24844    }
24845    _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _)
24846}
24847#[doc = "Store multiple 2-element structures from two registers"]
24848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"]
24849#[doc = "## Safety"]
24850#[doc = "  * Neon intrinsic unsafe"]
24851#[inline(always)]
24852#[target_feature(enable = "neon,aes")]
24853#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24854#[rustc_legacy_const_generics(2)]
24855#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24856pub unsafe fn vst2q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x2_t) {
24857    static_assert_uimm_bits!(LANE, 1);
24858    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
24859}
24860#[doc = "Store multiple 2-element structures from two registers"]
24861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"]
24862#[doc = "## Safety"]
24863#[doc = "  * Neon intrinsic unsafe"]
24864#[inline(always)]
24865#[target_feature(enable = "neon")]
24866#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24867#[rustc_legacy_const_generics(2)]
24868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24869pub unsafe fn vst2q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x2_t) {
24870    static_assert_uimm_bits!(LANE, 4);
24871    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
24872}
24873#[doc = "Store multiple 2-element structures from two registers"]
24874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"]
24875#[doc = "## Safety"]
24876#[doc = "  * Neon intrinsic unsafe"]
24877#[inline(always)]
24878#[target_feature(enable = "neon")]
24879#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24880#[rustc_legacy_const_generics(2)]
24881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24882pub unsafe fn vst2q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x2_t) {
24883    static_assert_uimm_bits!(LANE, 1);
24884    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
24885}
24886#[doc = "Store multiple 2-element structures from two registers"]
24887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"]
24888#[doc = "## Safety"]
24889#[doc = "  * Neon intrinsic unsafe"]
24890#[inline(always)]
24891#[target_feature(enable = "neon")]
24892#[cfg_attr(test, assert_instr(st2, LANE = 0))]
24893#[rustc_legacy_const_generics(2)]
24894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24895pub unsafe fn vst2q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x2_t) {
24896    static_assert_uimm_bits!(LANE, 4);
24897    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
24898}
24899#[doc = "Store multiple 2-element structures from two registers"]
24900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"]
24901#[doc = "## Safety"]
24902#[doc = "  * Neon intrinsic unsafe"]
24903#[inline(always)]
24904#[target_feature(enable = "neon,aes")]
24905#[cfg_attr(test, assert_instr(st2))]
24906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24907pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) {
24908    vst2q_s64(transmute(a), transmute(b))
24909}
24910#[doc = "Store multiple 2-element structures from two registers"]
24911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"]
24912#[doc = "## Safety"]
24913#[doc = "  * Neon intrinsic unsafe"]
24914#[inline(always)]
24915#[target_feature(enable = "neon")]
24916#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24917#[cfg_attr(test, assert_instr(st2))]
24918pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) {
24919    vst2q_s64(transmute(a), transmute(b))
24920}
24921#[doc = "Store multiple 3-element structures from three registers"]
24922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"]
24923#[doc = "## Safety"]
24924#[doc = "  * Neon intrinsic unsafe"]
24925#[inline(always)]
24926#[target_feature(enable = "neon")]
24927#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24928#[cfg_attr(test, assert_instr(nop))]
24929pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) {
24930    core::ptr::write_unaligned(a.cast(), b)
24931}
24932#[doc = "Store multiple 3-element structures from three registers"]
24933#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"]
24934#[doc = "## Safety"]
24935#[doc = "  * Neon intrinsic unsafe"]
24936#[inline(always)]
24937#[target_feature(enable = "neon")]
24938#[cfg_attr(test, assert_instr(st3, LANE = 0))]
24939#[rustc_legacy_const_generics(2)]
24940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24941pub unsafe fn vst3_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x3_t) {
24942    static_assert!(LANE == 0);
24943    unsafe extern "unadjusted" {
24944        #[cfg_attr(
24945            any(target_arch = "aarch64", target_arch = "arm64ec"),
24946            link_name = "llvm.aarch64.neon.st3lane.v1f64.p0"
24947        )]
24948        fn _vst3_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8);
24949    }
24950    _vst3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
24951}
24952#[doc = "Store multiple 3-element structures from three registers"]
24953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"]
24954#[doc = "## Safety"]
24955#[doc = "  * Neon intrinsic unsafe"]
24956#[inline(always)]
24957#[target_feature(enable = "neon")]
24958#[cfg_attr(test, assert_instr(st3, LANE = 0))]
24959#[rustc_legacy_const_generics(2)]
24960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24961pub unsafe fn vst3_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x3_t) {
24962    static_assert!(LANE == 0);
24963    unsafe extern "unadjusted" {
24964        #[cfg_attr(
24965            any(target_arch = "aarch64", target_arch = "arm64ec"),
24966            link_name = "llvm.aarch64.neon.st3lane.v1i64.p0"
24967        )]
24968        fn _vst3_lane_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8);
24969    }
24970    _vst3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
24971}
24972#[doc = "Store multiple 3-element structures from three registers"]
24973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"]
24974#[doc = "## Safety"]
24975#[doc = "  * Neon intrinsic unsafe"]
24976#[inline(always)]
24977#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24978#[target_feature(enable = "neon,aes")]
24979#[cfg_attr(test, assert_instr(st3, LANE = 0))]
24980#[rustc_legacy_const_generics(2)]
24981pub unsafe fn vst3_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x3_t) {
24982    static_assert!(LANE == 0);
24983    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
24984}
24985#[doc = "Store multiple 3-element structures from three registers"]
24986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"]
24987#[doc = "## Safety"]
24988#[doc = "  * Neon intrinsic unsafe"]
24989#[inline(always)]
24990#[target_feature(enable = "neon")]
24991#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24992#[cfg_attr(test, assert_instr(st3, LANE = 0))]
24993#[rustc_legacy_const_generics(2)]
24994pub unsafe fn vst3_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x3_t) {
24995    static_assert!(LANE == 0);
24996    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
24997}
24998#[doc = "Store multiple 3-element structures from three registers"]
24999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"]
25000#[doc = "## Safety"]
25001#[doc = "  * Neon intrinsic unsafe"]
25002#[inline(always)]
25003#[target_feature(enable = "neon")]
25004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25005#[cfg_attr(test, assert_instr(st3))]
25006pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) {
25007    crate::core_arch::macros::interleaving_store!(f64, 2, 3, a, b)
25008}
25009#[doc = "Store multiple 3-element structures from three registers"]
25010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"]
25011#[doc = "## Safety"]
25012#[doc = "  * Neon intrinsic unsafe"]
25013#[inline(always)]
25014#[target_feature(enable = "neon")]
25015#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25016#[cfg_attr(test, assert_instr(st3))]
25017pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) {
25018    crate::core_arch::macros::interleaving_store!(i64, 2, 3, a, b)
25019}
25020#[doc = "Store multiple 3-element structures from three registers"]
25021#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"]
25022#[doc = "## Safety"]
25023#[doc = "  * Neon intrinsic unsafe"]
25024#[inline(always)]
25025#[target_feature(enable = "neon")]
25026#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25027#[rustc_legacy_const_generics(2)]
25028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25029pub unsafe fn vst3q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x3_t) {
25030    static_assert_uimm_bits!(LANE, 1);
25031    unsafe extern "unadjusted" {
25032        #[cfg_attr(
25033            any(target_arch = "aarch64", target_arch = "arm64ec"),
25034            link_name = "llvm.aarch64.neon.st3lane.v2f64.p0"
25035        )]
25036        fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8);
25037    }
25038    _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
25039}
25040#[doc = "Store multiple 3-element structures from three registers"]
25041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"]
25042#[doc = "## Safety"]
25043#[doc = "  * Neon intrinsic unsafe"]
25044#[inline(always)]
25045#[target_feature(enable = "neon")]
25046#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25047#[rustc_legacy_const_generics(2)]
25048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25049pub unsafe fn vst3q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x3_t) {
25050    static_assert_uimm_bits!(LANE, 4);
25051    unsafe extern "unadjusted" {
25052        #[cfg_attr(
25053            any(target_arch = "aarch64", target_arch = "arm64ec"),
25054            link_name = "llvm.aarch64.neon.st3lane.v16i8.p0"
25055        )]
25056        fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8);
25057    }
25058    _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
25059}
25060#[doc = "Store multiple 3-element structures from three registers"]
25061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"]
25062#[doc = "## Safety"]
25063#[doc = "  * Neon intrinsic unsafe"]
25064#[inline(always)]
25065#[target_feature(enable = "neon")]
25066#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25067#[rustc_legacy_const_generics(2)]
25068#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25069pub unsafe fn vst3q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x3_t) {
25070    static_assert_uimm_bits!(LANE, 1);
25071    unsafe extern "unadjusted" {
25072        #[cfg_attr(
25073            any(target_arch = "aarch64", target_arch = "arm64ec"),
25074            link_name = "llvm.aarch64.neon.st3lane.v2i64.p0"
25075        )]
25076        fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8);
25077    }
25078    _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
25079}
25080#[doc = "Store multiple 3-element structures from three registers"]
25081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"]
25082#[doc = "## Safety"]
25083#[doc = "  * Neon intrinsic unsafe"]
25084#[inline(always)]
25085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25086#[target_feature(enable = "neon,aes")]
25087#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25088#[rustc_legacy_const_generics(2)]
25089pub unsafe fn vst3q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x3_t) {
25090    static_assert_uimm_bits!(LANE, 1);
25091    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
25092}
25093#[doc = "Store multiple 3-element structures from three registers"]
25094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"]
25095#[doc = "## Safety"]
25096#[doc = "  * Neon intrinsic unsafe"]
25097#[inline(always)]
25098#[target_feature(enable = "neon")]
25099#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25100#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25101#[rustc_legacy_const_generics(2)]
25102pub unsafe fn vst3q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x3_t) {
25103    static_assert_uimm_bits!(LANE, 4);
25104    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
25105}
25106#[doc = "Store multiple 3-element structures from three registers"]
25107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"]
25108#[doc = "## Safety"]
25109#[doc = "  * Neon intrinsic unsafe"]
25110#[inline(always)]
25111#[target_feature(enable = "neon")]
25112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25113#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25114#[rustc_legacy_const_generics(2)]
25115pub unsafe fn vst3q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x3_t) {
25116    static_assert_uimm_bits!(LANE, 1);
25117    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
25118}
25119#[doc = "Store multiple 3-element structures from three registers"]
25120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"]
25121#[doc = "## Safety"]
25122#[doc = "  * Neon intrinsic unsafe"]
25123#[inline(always)]
25124#[target_feature(enable = "neon")]
25125#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25126#[cfg_attr(test, assert_instr(st3, LANE = 0))]
25127#[rustc_legacy_const_generics(2)]
25128pub unsafe fn vst3q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x3_t) {
25129    static_assert_uimm_bits!(LANE, 4);
25130    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
25131}
25132#[doc = "Store multiple 3-element structures from three registers"]
25133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"]
25134#[doc = "## Safety"]
25135#[doc = "  * Neon intrinsic unsafe"]
25136#[inline(always)]
25137#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25138#[target_feature(enable = "neon,aes")]
25139#[cfg_attr(test, assert_instr(st3))]
25140pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) {
25141    vst3q_s64(transmute(a), transmute(b))
25142}
25143#[doc = "Store multiple 3-element structures from three registers"]
25144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"]
25145#[doc = "## Safety"]
25146#[doc = "  * Neon intrinsic unsafe"]
25147#[inline(always)]
25148#[target_feature(enable = "neon")]
25149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25150#[cfg_attr(test, assert_instr(st3))]
25151pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) {
25152    vst3q_s64(transmute(a), transmute(b))
25153}
25154#[doc = "Store multiple 4-element structures from four registers"]
25155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"]
25156#[doc = "## Safety"]
25157#[doc = "  * Neon intrinsic unsafe"]
25158#[inline(always)]
25159#[target_feature(enable = "neon")]
25160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25161#[cfg_attr(test, assert_instr(nop))]
25162pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) {
25163    core::ptr::write_unaligned(a.cast(), b)
25164}
25165#[doc = "Store multiple 4-element structures from four registers"]
25166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"]
25167#[doc = "## Safety"]
25168#[doc = "  * Neon intrinsic unsafe"]
25169#[inline(always)]
25170#[target_feature(enable = "neon")]
25171#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25172#[rustc_legacy_const_generics(2)]
25173#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25174pub unsafe fn vst4_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x4_t) {
25175    static_assert!(LANE == 0);
25176    unsafe extern "unadjusted" {
25177        #[cfg_attr(
25178            any(target_arch = "aarch64", target_arch = "arm64ec"),
25179            link_name = "llvm.aarch64.neon.st4lane.v1f64.p0"
25180        )]
25181        fn _vst4_lane_f64(
25182            a: float64x1_t,
25183            b: float64x1_t,
25184            c: float64x1_t,
25185            d: float64x1_t,
25186            n: i64,
25187            ptr: *mut i8,
25188        );
25189    }
25190    _vst4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25191}
25192#[doc = "Store multiple 4-element structures from four registers"]
25193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"]
25194#[doc = "## Safety"]
25195#[doc = "  * Neon intrinsic unsafe"]
25196#[inline(always)]
25197#[target_feature(enable = "neon")]
25198#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25199#[rustc_legacy_const_generics(2)]
25200#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25201pub unsafe fn vst4_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x4_t) {
25202    static_assert!(LANE == 0);
25203    unsafe extern "unadjusted" {
25204        #[cfg_attr(
25205            any(target_arch = "aarch64", target_arch = "arm64ec"),
25206            link_name = "llvm.aarch64.neon.st4lane.v1i64.p0"
25207        )]
25208        fn _vst4_lane_s64(
25209            a: int64x1_t,
25210            b: int64x1_t,
25211            c: int64x1_t,
25212            d: int64x1_t,
25213            n: i64,
25214            ptr: *mut i8,
25215        );
25216    }
25217    _vst4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25218}
25219#[doc = "Store multiple 4-element structures from four registers"]
25220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"]
25221#[doc = "## Safety"]
25222#[doc = "  * Neon intrinsic unsafe"]
25223#[inline(always)]
25224#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25225#[target_feature(enable = "neon,aes")]
25226#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25227#[rustc_legacy_const_generics(2)]
25228pub unsafe fn vst4_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x4_t) {
25229    static_assert!(LANE == 0);
25230    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
25231}
25232#[doc = "Store multiple 4-element structures from four registers"]
25233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"]
25234#[doc = "## Safety"]
25235#[doc = "  * Neon intrinsic unsafe"]
25236#[inline(always)]
25237#[target_feature(enable = "neon")]
25238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25239#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25240#[rustc_legacy_const_generics(2)]
25241pub unsafe fn vst4_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x4_t) {
25242    static_assert!(LANE == 0);
25243    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
25244}
25245#[doc = "Store multiple 4-element structures from four registers"]
25246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"]
25247#[doc = "## Safety"]
25248#[doc = "  * Neon intrinsic unsafe"]
25249#[inline(always)]
25250#[target_feature(enable = "neon")]
25251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25252#[cfg_attr(test, assert_instr(st4))]
25253pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) {
25254    crate::core_arch::macros::interleaving_store!(f64, 2, 4, a, b)
25255}
25256#[doc = "Store multiple 4-element structures from four registers"]
25257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"]
25258#[doc = "## Safety"]
25259#[doc = "  * Neon intrinsic unsafe"]
25260#[inline(always)]
25261#[target_feature(enable = "neon")]
25262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25263#[cfg_attr(test, assert_instr(st4))]
25264pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) {
25265    crate::core_arch::macros::interleaving_store!(i64, 2, 4, a, b)
25266}
25267#[doc = "Store multiple 4-element structures from four registers"]
25268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"]
25269#[doc = "## Safety"]
25270#[doc = "  * Neon intrinsic unsafe"]
25271#[inline(always)]
25272#[target_feature(enable = "neon")]
25273#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25274#[rustc_legacy_const_generics(2)]
25275#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25276pub unsafe fn vst4q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x4_t) {
25277    static_assert_uimm_bits!(LANE, 1);
25278    unsafe extern "unadjusted" {
25279        #[cfg_attr(
25280            any(target_arch = "aarch64", target_arch = "arm64ec"),
25281            link_name = "llvm.aarch64.neon.st4lane.v2f64.p0"
25282        )]
25283        fn _vst4q_lane_f64(
25284            a: float64x2_t,
25285            b: float64x2_t,
25286            c: float64x2_t,
25287            d: float64x2_t,
25288            n: i64,
25289            ptr: *mut i8,
25290        );
25291    }
25292    _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25293}
25294#[doc = "Store multiple 4-element structures from four registers"]
25295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"]
25296#[doc = "## Safety"]
25297#[doc = "  * Neon intrinsic unsafe"]
25298#[inline(always)]
25299#[target_feature(enable = "neon")]
25300#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25301#[rustc_legacy_const_generics(2)]
25302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25303pub unsafe fn vst4q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x4_t) {
25304    static_assert_uimm_bits!(LANE, 4);
25305    unsafe extern "unadjusted" {
25306        #[cfg_attr(
25307            any(target_arch = "aarch64", target_arch = "arm64ec"),
25308            link_name = "llvm.aarch64.neon.st4lane.v16i8.p0"
25309        )]
25310        fn _vst4q_lane_s8(
25311            a: int8x16_t,
25312            b: int8x16_t,
25313            c: int8x16_t,
25314            d: int8x16_t,
25315            n: i64,
25316            ptr: *mut i8,
25317        );
25318    }
25319    _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25320}
25321#[doc = "Store multiple 4-element structures from four registers"]
25322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"]
25323#[doc = "## Safety"]
25324#[doc = "  * Neon intrinsic unsafe"]
25325#[inline(always)]
25326#[target_feature(enable = "neon")]
25327#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25328#[rustc_legacy_const_generics(2)]
25329#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25330pub unsafe fn vst4q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x4_t) {
25331    static_assert_uimm_bits!(LANE, 1);
25332    unsafe extern "unadjusted" {
25333        #[cfg_attr(
25334            any(target_arch = "aarch64", target_arch = "arm64ec"),
25335            link_name = "llvm.aarch64.neon.st4lane.v2i64.p0"
25336        )]
25337        fn _vst4q_lane_s64(
25338            a: int64x2_t,
25339            b: int64x2_t,
25340            c: int64x2_t,
25341            d: int64x2_t,
25342            n: i64,
25343            ptr: *mut i8,
25344        );
25345    }
25346    _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
25347}
25348#[doc = "Store multiple 4-element structures from four registers"]
25349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"]
25350#[doc = "## Safety"]
25351#[doc = "  * Neon intrinsic unsafe"]
25352#[inline(always)]
25353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25354#[target_feature(enable = "neon,aes")]
25355#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25356#[rustc_legacy_const_generics(2)]
25357pub unsafe fn vst4q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x4_t) {
25358    static_assert_uimm_bits!(LANE, 1);
25359    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
25360}
25361#[doc = "Store multiple 4-element structures from four registers"]
25362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"]
25363#[doc = "## Safety"]
25364#[doc = "  * Neon intrinsic unsafe"]
25365#[inline(always)]
25366#[target_feature(enable = "neon")]
25367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25368#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25369#[rustc_legacy_const_generics(2)]
25370pub unsafe fn vst4q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x4_t) {
25371    static_assert_uimm_bits!(LANE, 4);
25372    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
25373}
25374#[doc = "Store multiple 4-element structures from four registers"]
25375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"]
25376#[doc = "## Safety"]
25377#[doc = "  * Neon intrinsic unsafe"]
25378#[inline(always)]
25379#[target_feature(enable = "neon")]
25380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25381#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25382#[rustc_legacy_const_generics(2)]
25383pub unsafe fn vst4q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x4_t) {
25384    static_assert_uimm_bits!(LANE, 1);
25385    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
25386}
25387#[doc = "Store multiple 4-element structures from four registers"]
25388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"]
25389#[doc = "## Safety"]
25390#[doc = "  * Neon intrinsic unsafe"]
25391#[inline(always)]
25392#[target_feature(enable = "neon")]
25393#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25394#[cfg_attr(test, assert_instr(st4, LANE = 0))]
25395#[rustc_legacy_const_generics(2)]
25396pub unsafe fn vst4q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x4_t) {
25397    static_assert_uimm_bits!(LANE, 4);
25398    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
25399}
25400#[doc = "Store multiple 4-element structures from four registers"]
25401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"]
25402#[doc = "## Safety"]
25403#[doc = "  * Neon intrinsic unsafe"]
25404#[inline(always)]
25405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25406#[target_feature(enable = "neon,aes")]
25407#[cfg_attr(test, assert_instr(st4))]
25408pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) {
25409    vst4q_s64(transmute(a), transmute(b))
25410}
25411#[doc = "Store multiple 4-element structures from four registers"]
25412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"]
25413#[doc = "## Safety"]
25414#[doc = "  * Neon intrinsic unsafe"]
25415#[inline(always)]
25416#[target_feature(enable = "neon")]
25417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25418#[cfg_attr(test, assert_instr(st4))]
25419pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) {
25420    vst4q_s64(transmute(a), transmute(b))
25421}
25422#[doc = "Store-Release a single-element structure from one lane of one register."]
25423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_f64)"]
25424#[inline(always)]
25425#[target_feature(enable = "neon,rcpc3")]
25426#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25427#[rustc_legacy_const_generics(2)]
25428#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25429#[cfg(target_has_atomic = "64")]
25430pub fn vstl1_lane_f64<const LANE: i32>(ptr: *mut f64, val: float64x1_t) {
25431    static_assert!(LANE == 0);
25432    unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25433}
25434#[doc = "Store-Release a single-element structure from one lane of one register."]
25435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_f64)"]
25436#[inline(always)]
25437#[target_feature(enable = "neon,rcpc3")]
25438#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25439#[rustc_legacy_const_generics(2)]
25440#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25441#[cfg(target_has_atomic = "64")]
25442pub fn vstl1q_lane_f64<const LANE: i32>(ptr: *mut f64, val: float64x2_t) {
25443    static_assert_uimm_bits!(LANE, 1);
25444    unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25445}
25446#[doc = "Store-Release a single-element structure from one lane of one register."]
25447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_u64)"]
25448#[inline(always)]
25449#[target_feature(enable = "neon,rcpc3")]
25450#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25451#[rustc_legacy_const_generics(2)]
25452#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25453#[cfg(target_has_atomic = "64")]
25454pub fn vstl1_lane_u64<const LANE: i32>(ptr: *mut u64, val: uint64x1_t) {
25455    static_assert!(LANE == 0);
25456    unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25457}
25458#[doc = "Store-Release a single-element structure from one lane of one register."]
25459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_u64)"]
25460#[inline(always)]
25461#[target_feature(enable = "neon,rcpc3")]
25462#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25463#[rustc_legacy_const_generics(2)]
25464#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25465#[cfg(target_has_atomic = "64")]
25466pub fn vstl1q_lane_u64<const LANE: i32>(ptr: *mut u64, val: uint64x2_t) {
25467    static_assert_uimm_bits!(LANE, 1);
25468    unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25469}
25470#[doc = "Store-Release a single-element structure from one lane of one register."]
25471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_p64)"]
25472#[inline(always)]
25473#[target_feature(enable = "neon,rcpc3")]
25474#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25475#[rustc_legacy_const_generics(2)]
25476#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25477#[cfg(target_has_atomic = "64")]
25478pub fn vstl1_lane_p64<const LANE: i32>(ptr: *mut p64, val: poly64x1_t) {
25479    static_assert!(LANE == 0);
25480    unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25481}
25482#[doc = "Store-Release a single-element structure from one lane of one register."]
25483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_p64)"]
25484#[inline(always)]
25485#[target_feature(enable = "neon,rcpc3")]
25486#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25487#[rustc_legacy_const_generics(2)]
25488#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25489#[cfg(target_has_atomic = "64")]
25490pub fn vstl1q_lane_p64<const LANE: i32>(ptr: *mut p64, val: poly64x2_t) {
25491    static_assert_uimm_bits!(LANE, 1);
25492    unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
25493}
25494#[doc = "Store-Release a single-element structure from one lane of one register."]
25495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_s64)"]
25496#[inline(always)]
25497#[target_feature(enable = "neon,rcpc3")]
25498#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25499#[rustc_legacy_const_generics(2)]
25500#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25501#[cfg(target_has_atomic = "64")]
25502pub fn vstl1_lane_s64<const LANE: i32>(ptr: *mut i64, val: int64x1_t) {
25503    static_assert!(LANE == 0);
25504    let atomic_dst = ptr as *mut crate::sync::atomic::AtomicI64;
25505    unsafe {
25506        let lane: i64 = simd_extract!(val, LANE as u32);
25507        (*atomic_dst).store(transmute(lane), crate::sync::atomic::Ordering::Release)
25508    }
25509}
25510#[doc = "Store-Release a single-element structure from one lane of one register."]
25511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_s64)"]
25512#[inline(always)]
25513#[target_feature(enable = "neon,rcpc3")]
25514#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
25515#[rustc_legacy_const_generics(2)]
25516#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
25517#[cfg(target_has_atomic = "64")]
25518pub fn vstl1q_lane_s64<const LANE: i32>(ptr: *mut i64, val: int64x2_t) {
25519    static_assert_uimm_bits!(LANE, 1);
25520    let atomic_dst = ptr as *mut crate::sync::atomic::AtomicI64;
25521    unsafe {
25522        let lane: i64 = simd_extract!(val, LANE as u32);
25523        (*atomic_dst).store(transmute(lane), crate::sync::atomic::Ordering::Release)
25524    }
25525}
25526#[doc = "Subtract"]
25527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"]
25528#[inline(always)]
25529#[target_feature(enable = "neon")]
25530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25531#[cfg_attr(test, assert_instr(fsub))]
25532pub fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
25533    unsafe { simd_sub(a, b) }
25534}
25535#[doc = "Subtract"]
25536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"]
25537#[inline(always)]
25538#[target_feature(enable = "neon")]
25539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25540#[cfg_attr(test, assert_instr(fsub))]
25541pub fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
25542    unsafe { simd_sub(a, b) }
25543}
25544#[doc = "Subtract"]
25545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"]
25546#[inline(always)]
25547#[target_feature(enable = "neon")]
25548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25549#[cfg_attr(test, assert_instr(sub))]
25550pub fn vsubd_s64(a: i64, b: i64) -> i64 {
25551    a.wrapping_sub(b)
25552}
25553#[doc = "Subtract"]
25554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"]
25555#[inline(always)]
25556#[target_feature(enable = "neon")]
25557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25558#[cfg_attr(test, assert_instr(sub))]
25559pub fn vsubd_u64(a: u64, b: u64) -> u64 {
25560    a.wrapping_sub(b)
25561}
25562#[doc = "Subtract"]
25563#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubh_f16)"]
25564#[inline(always)]
25565#[target_feature(enable = "neon,fp16")]
25566#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25567#[cfg(not(target_arch = "arm64ec"))]
25568#[cfg_attr(test, assert_instr(fsub))]
25569pub fn vsubh_f16(a: f16, b: f16) -> f16 {
25570    a - b
25571}
25572#[doc = "Signed Subtract Long"]
25573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"]
25574#[inline(always)]
25575#[target_feature(enable = "neon")]
25576#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25577#[cfg_attr(test, assert_instr(ssubl2))]
25578pub fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
25579    unsafe {
25580        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
25581        let d: int16x8_t = simd_cast(c);
25582        let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
25583        let f: int16x8_t = simd_cast(e);
25584        simd_sub(d, f)
25585    }
25586}
25587#[doc = "Signed Subtract Long"]
25588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"]
25589#[inline(always)]
25590#[target_feature(enable = "neon")]
25591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25592#[cfg_attr(test, assert_instr(ssubl2))]
25593pub fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
25594    unsafe {
25595        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
25596        let d: int32x4_t = simd_cast(c);
25597        let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
25598        let f: int32x4_t = simd_cast(e);
25599        simd_sub(d, f)
25600    }
25601}
25602#[doc = "Signed Subtract Long"]
25603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"]
25604#[inline(always)]
25605#[target_feature(enable = "neon")]
25606#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25607#[cfg_attr(test, assert_instr(ssubl2))]
25608pub fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
25609    unsafe {
25610        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
25611        let d: int64x2_t = simd_cast(c);
25612        let e: int32x2_t = simd_shuffle!(b, b, [2, 3]);
25613        let f: int64x2_t = simd_cast(e);
25614        simd_sub(d, f)
25615    }
25616}
25617#[doc = "Unsigned Subtract Long"]
25618#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"]
25619#[inline(always)]
25620#[target_feature(enable = "neon")]
25621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25622#[cfg_attr(test, assert_instr(usubl2))]
25623pub fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
25624    unsafe {
25625        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
25626        let d: uint16x8_t = simd_cast(c);
25627        let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
25628        let f: uint16x8_t = simd_cast(e);
25629        simd_sub(d, f)
25630    }
25631}
25632#[doc = "Unsigned Subtract Long"]
25633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"]
25634#[inline(always)]
25635#[target_feature(enable = "neon")]
25636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25637#[cfg_attr(test, assert_instr(usubl2))]
25638pub fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
25639    unsafe {
25640        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
25641        let d: uint32x4_t = simd_cast(c);
25642        let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
25643        let f: uint32x4_t = simd_cast(e);
25644        simd_sub(d, f)
25645    }
25646}
25647#[doc = "Unsigned Subtract Long"]
25648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"]
25649#[inline(always)]
25650#[target_feature(enable = "neon")]
25651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25652#[cfg_attr(test, assert_instr(usubl2))]
25653pub fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
25654    unsafe {
25655        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
25656        let d: uint64x2_t = simd_cast(c);
25657        let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
25658        let f: uint64x2_t = simd_cast(e);
25659        simd_sub(d, f)
25660    }
25661}
25662#[doc = "Signed Subtract Wide"]
25663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"]
25664#[inline(always)]
25665#[target_feature(enable = "neon")]
25666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25667#[cfg_attr(test, assert_instr(ssubw2))]
25668pub fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
25669    unsafe {
25670        let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
25671        simd_sub(a, simd_cast(c))
25672    }
25673}
25674#[doc = "Signed Subtract Wide"]
25675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"]
25676#[inline(always)]
25677#[target_feature(enable = "neon")]
25678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25679#[cfg_attr(test, assert_instr(ssubw2))]
25680pub fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
25681    unsafe {
25682        let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
25683        simd_sub(a, simd_cast(c))
25684    }
25685}
25686#[doc = "Signed Subtract Wide"]
25687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"]
25688#[inline(always)]
25689#[target_feature(enable = "neon")]
25690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25691#[cfg_attr(test, assert_instr(ssubw2))]
25692pub fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
25693    unsafe {
25694        let c: int32x2_t = simd_shuffle!(b, b, [2, 3]);
25695        simd_sub(a, simd_cast(c))
25696    }
25697}
25698#[doc = "Unsigned Subtract Wide"]
25699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"]
25700#[inline(always)]
25701#[target_feature(enable = "neon")]
25702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25703#[cfg_attr(test, assert_instr(usubw2))]
25704pub fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
25705    unsafe {
25706        let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
25707        simd_sub(a, simd_cast(c))
25708    }
25709}
25710#[doc = "Unsigned Subtract Wide"]
25711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"]
25712#[inline(always)]
25713#[target_feature(enable = "neon")]
25714#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25715#[cfg_attr(test, assert_instr(usubw2))]
25716pub fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
25717    unsafe {
25718        let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
25719        simd_sub(a, simd_cast(c))
25720    }
25721}
25722#[doc = "Unsigned Subtract Wide"]
25723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"]
25724#[inline(always)]
25725#[target_feature(enable = "neon")]
25726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25727#[cfg_attr(test, assert_instr(usubw2))]
25728pub fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
25729    unsafe {
25730        let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
25731        simd_sub(a, simd_cast(c))
25732    }
25733}
25734#[doc = "Table look-up"]
25735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"]
25736#[inline(always)]
25737#[target_feature(enable = "neon")]
25738#[cfg_attr(test, assert_instr(tbl))]
25739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25740pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
25741    vqtbl1_s8(vcombine_s8(a, unsafe { crate::mem::zeroed() }), unsafe {
25742        {
25743            transmute(b)
25744        }
25745    })
25746}
25747#[doc = "Table look-up"]
25748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"]
25749#[inline(always)]
25750#[target_feature(enable = "neon")]
25751#[cfg_attr(test, assert_instr(tbl))]
25752#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25753pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
25754    vqtbl1_u8(vcombine_u8(a, unsafe { crate::mem::zeroed() }), b)
25755}
25756#[doc = "Table look-up"]
25757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"]
25758#[inline(always)]
25759#[target_feature(enable = "neon")]
25760#[cfg_attr(test, assert_instr(tbl))]
25761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25762pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t {
25763    vqtbl1_p8(vcombine_p8(a, unsafe { crate::mem::zeroed() }), b)
25764}
25765#[doc = "Table look-up"]
25766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"]
25767#[inline(always)]
25768#[target_feature(enable = "neon")]
25769#[cfg_attr(test, assert_instr(tbl))]
25770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25771pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t {
25772    unsafe { vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)) }
25773}
25774#[doc = "Table look-up"]
25775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
25776#[inline(always)]
25777#[target_feature(enable = "neon")]
25778#[cfg_attr(test, assert_instr(tbl))]
25779#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25780pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
25781    unsafe { transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)) }
25782}
25783#[doc = "Table look-up"]
25784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
25785#[inline(always)]
25786#[target_feature(enable = "neon")]
25787#[cfg_attr(test, assert_instr(tbl))]
25788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25789pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
25790    unsafe { transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)) }
25791}
25792#[doc = "Table look-up"]
25793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"]
25794#[inline(always)]
25795#[target_feature(enable = "neon")]
25796#[cfg_attr(test, assert_instr(tbl))]
25797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25798pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t {
25799    let x = int8x16x2_t(
25800        vcombine_s8(a.0, a.1),
25801        vcombine_s8(a.2, unsafe { crate::mem::zeroed() }),
25802    );
25803    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
25804}
25805#[doc = "Table look-up"]
25806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
25807#[inline(always)]
25808#[target_feature(enable = "neon")]
25809#[cfg_attr(test, assert_instr(tbl))]
25810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25811pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
25812    let x = uint8x16x2_t(
25813        vcombine_u8(a.0, a.1),
25814        vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
25815    );
25816    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
25817}
25818#[doc = "Table look-up"]
25819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
25820#[inline(always)]
25821#[target_feature(enable = "neon")]
25822#[cfg_attr(test, assert_instr(tbl))]
25823#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25824pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
25825    let x = poly8x16x2_t(
25826        vcombine_p8(a.0, a.1),
25827        vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
25828    );
25829    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
25830}
25831#[doc = "Table look-up"]
25832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"]
25833#[inline(always)]
25834#[target_feature(enable = "neon")]
25835#[cfg_attr(test, assert_instr(tbl))]
25836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25837pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t {
25838    let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3));
25839    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
25840}
25841#[doc = "Table look-up"]
25842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
25843#[inline(always)]
25844#[target_feature(enable = "neon")]
25845#[cfg_attr(test, assert_instr(tbl))]
25846#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25847pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
25848    let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
25849    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
25850}
25851#[doc = "Table look-up"]
25852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
25853#[inline(always)]
25854#[target_feature(enable = "neon")]
25855#[cfg_attr(test, assert_instr(tbl))]
25856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25857pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
25858    let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
25859    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
25860}
25861#[doc = "Extended table look-up"]
25862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"]
25863#[inline(always)]
25864#[target_feature(enable = "neon")]
25865#[cfg_attr(test, assert_instr(tbx))]
25866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25867pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
25868    unsafe {
25869        simd_select(
25870            simd_lt::<int8x8_t, int8x8_t>(c, transmute(i8x8::splat(8))),
25871            transmute(vqtbx1(
25872                transmute(a),
25873                transmute(vcombine_s8(b, crate::mem::zeroed())),
25874                transmute(c),
25875            )),
25876            a,
25877        )
25878    }
25879}
25880#[doc = "Extended table look-up"]
25881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"]
25882#[inline(always)]
25883#[target_feature(enable = "neon")]
25884#[cfg_attr(test, assert_instr(tbx))]
25885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25886pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
25887    unsafe {
25888        simd_select(
25889            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
25890            transmute(vqtbx1(
25891                transmute(a),
25892                transmute(vcombine_u8(b, crate::mem::zeroed())),
25893                c,
25894            )),
25895            a,
25896        )
25897    }
25898}
25899#[doc = "Extended table look-up"]
25900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"]
25901#[inline(always)]
25902#[target_feature(enable = "neon")]
25903#[cfg_attr(test, assert_instr(tbx))]
25904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25905pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t {
25906    unsafe {
25907        simd_select(
25908            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
25909            transmute(vqtbx1(
25910                transmute(a),
25911                transmute(vcombine_p8(b, crate::mem::zeroed())),
25912                c,
25913            )),
25914            a,
25915        )
25916    }
25917}
25918#[doc = "Extended table look-up"]
25919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"]
25920#[inline(always)]
25921#[target_feature(enable = "neon")]
25922#[cfg_attr(test, assert_instr(tbx))]
25923#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25924pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t {
25925    unsafe { vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)) }
25926}
25927#[doc = "Extended table look-up"]
25928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
25929#[inline(always)]
25930#[target_feature(enable = "neon")]
25931#[cfg_attr(test, assert_instr(tbx))]
25932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25933pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
25934    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)) }
25935}
25936#[doc = "Extended table look-up"]
25937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
25938#[inline(always)]
25939#[target_feature(enable = "neon")]
25940#[cfg_attr(test, assert_instr(tbx))]
25941#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25942pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
25943    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)) }
25944}
25945#[doc = "Extended table look-up"]
25946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"]
25947#[inline(always)]
25948#[target_feature(enable = "neon")]
25949#[cfg_attr(test, assert_instr(tbx))]
25950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25951pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t {
25952    let x = int8x16x2_t(
25953        vcombine_s8(b.0, b.1),
25954        vcombine_s8(b.2, unsafe { crate::mem::zeroed() }),
25955    );
25956    unsafe {
25957        transmute(simd_select(
25958            simd_lt::<int8x8_t, int8x8_t>(transmute(c), transmute(i8x8::splat(24))),
25959            transmute(vqtbx2(
25960                transmute(a),
25961                transmute(x.0),
25962                transmute(x.1),
25963                transmute(c),
25964            )),
25965            a,
25966        ))
25967    }
25968}
25969#[doc = "Extended table look-up"]
25970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
25971#[inline(always)]
25972#[target_feature(enable = "neon")]
25973#[cfg_attr(test, assert_instr(tbx))]
25974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25975pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
25976    let x = uint8x16x2_t(
25977        vcombine_u8(b.0, b.1),
25978        vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
25979    );
25980    unsafe {
25981        transmute(simd_select(
25982            simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
25983            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
25984            a,
25985        ))
25986    }
25987}
25988#[doc = "Extended table look-up"]
25989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
25990#[inline(always)]
25991#[target_feature(enable = "neon")]
25992#[cfg_attr(test, assert_instr(tbx))]
25993#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25994pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
25995    let x = poly8x16x2_t(
25996        vcombine_p8(b.0, b.1),
25997        vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
25998    );
25999    unsafe {
26000        transmute(simd_select(
26001            simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
26002            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
26003            a,
26004        ))
26005    }
26006}
26007#[doc = "Extended table look-up"]
26008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"]
26009#[inline(always)]
26010#[target_feature(enable = "neon")]
26011#[cfg_attr(test, assert_instr(tbx))]
26012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26013pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t {
26014    unsafe {
26015        vqtbx2(
26016            transmute(a),
26017            transmute(vcombine_s8(b.0, b.1)),
26018            transmute(vcombine_s8(b.2, b.3)),
26019            transmute(c),
26020        )
26021    }
26022}
26023#[doc = "Extended table look-up"]
26024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
26025#[inline(always)]
26026#[target_feature(enable = "neon")]
26027#[cfg_attr(test, assert_instr(tbx))]
26028#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26029pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
26030    unsafe {
26031        transmute(vqtbx2(
26032            transmute(a),
26033            transmute(vcombine_u8(b.0, b.1)),
26034            transmute(vcombine_u8(b.2, b.3)),
26035            c,
26036        ))
26037    }
26038}
26039#[doc = "Extended table look-up"]
26040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
26041#[inline(always)]
26042#[target_feature(enable = "neon")]
26043#[cfg_attr(test, assert_instr(tbx))]
26044#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26045pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
26046    unsafe {
26047        transmute(vqtbx2(
26048            transmute(a),
26049            transmute(vcombine_p8(b.0, b.1)),
26050            transmute(vcombine_p8(b.2, b.3)),
26051            c,
26052        ))
26053    }
26054}
26055#[doc = "Transpose vectors"]
26056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"]
26057#[inline(always)]
26058#[target_feature(enable = "neon,fp16")]
26059#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26060#[cfg(not(target_arch = "arm64ec"))]
26061#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26062pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
26063    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26064}
26065#[doc = "Transpose vectors"]
26066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f16)"]
26067#[inline(always)]
26068#[target_feature(enable = "neon,fp16")]
26069#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26070#[cfg(not(target_arch = "arm64ec"))]
26071#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26072pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
26073    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26074}
26075#[doc = "Transpose vectors"]
26076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"]
26077#[inline(always)]
26078#[target_feature(enable = "neon")]
26079#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26080#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26081pub fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
26082    unsafe { simd_shuffle!(a, b, [0, 2]) }
26083}
26084#[doc = "Transpose vectors"]
26085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"]
26086#[inline(always)]
26087#[target_feature(enable = "neon")]
26088#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26089#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26090pub fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26091    unsafe { simd_shuffle!(a, b, [0, 2]) }
26092}
26093#[doc = "Transpose vectors"]
26094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"]
26095#[inline(always)]
26096#[target_feature(enable = "neon")]
26097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26098#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26099pub fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
26100    unsafe { simd_shuffle!(a, b, [0, 2]) }
26101}
26102#[doc = "Transpose vectors"]
26103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"]
26104#[inline(always)]
26105#[target_feature(enable = "neon")]
26106#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26107#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26108pub fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
26109    unsafe { simd_shuffle!(a, b, [0, 2]) }
26110}
26111#[doc = "Transpose vectors"]
26112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"]
26113#[inline(always)]
26114#[target_feature(enable = "neon")]
26115#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26116#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26117pub fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
26118    unsafe { simd_shuffle!(a, b, [0, 2]) }
26119}
26120#[doc = "Transpose vectors"]
26121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"]
26122#[inline(always)]
26123#[target_feature(enable = "neon")]
26124#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26125#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26126pub fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
26127    unsafe { simd_shuffle!(a, b, [0, 2]) }
26128}
26129#[doc = "Transpose vectors"]
26130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"]
26131#[inline(always)]
26132#[target_feature(enable = "neon")]
26133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26134#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26135pub fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
26136    unsafe { simd_shuffle!(a, b, [0, 2]) }
26137}
26138#[doc = "Transpose vectors"]
26139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"]
26140#[inline(always)]
26141#[target_feature(enable = "neon")]
26142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26143#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26144pub fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
26145    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26146}
26147#[doc = "Transpose vectors"]
26148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"]
26149#[inline(always)]
26150#[target_feature(enable = "neon")]
26151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26152#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26153pub fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
26154    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26155}
26156#[doc = "Transpose vectors"]
26157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"]
26158#[inline(always)]
26159#[target_feature(enable = "neon")]
26160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26161#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26162pub fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
26163    unsafe {
26164        simd_shuffle!(
26165            a,
26166            b,
26167            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
26168        )
26169    }
26170}
26171#[doc = "Transpose vectors"]
26172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"]
26173#[inline(always)]
26174#[target_feature(enable = "neon")]
26175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26176#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26177pub fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
26178    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26179}
26180#[doc = "Transpose vectors"]
26181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"]
26182#[inline(always)]
26183#[target_feature(enable = "neon")]
26184#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26185#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26186pub fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
26187    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26188}
26189#[doc = "Transpose vectors"]
26190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"]
26191#[inline(always)]
26192#[target_feature(enable = "neon")]
26193#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26194#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26195pub fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
26196    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26197}
26198#[doc = "Transpose vectors"]
26199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"]
26200#[inline(always)]
26201#[target_feature(enable = "neon")]
26202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26203#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26204pub fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
26205    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26206}
26207#[doc = "Transpose vectors"]
26208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"]
26209#[inline(always)]
26210#[target_feature(enable = "neon")]
26211#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26212#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26213pub fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
26214    unsafe {
26215        simd_shuffle!(
26216            a,
26217            b,
26218            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
26219        )
26220    }
26221}
26222#[doc = "Transpose vectors"]
26223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"]
26224#[inline(always)]
26225#[target_feature(enable = "neon")]
26226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26227#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26228pub fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
26229    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26230}
26231#[doc = "Transpose vectors"]
26232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"]
26233#[inline(always)]
26234#[target_feature(enable = "neon")]
26235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26236#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26237pub fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
26238    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26239}
26240#[doc = "Transpose vectors"]
26241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"]
26242#[inline(always)]
26243#[target_feature(enable = "neon")]
26244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26245#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26246pub fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
26247    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26248}
26249#[doc = "Transpose vectors"]
26250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"]
26251#[inline(always)]
26252#[target_feature(enable = "neon")]
26253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26254#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26255pub fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
26256    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26257}
26258#[doc = "Transpose vectors"]
26259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"]
26260#[inline(always)]
26261#[target_feature(enable = "neon")]
26262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26263#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26264pub fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
26265    unsafe {
26266        simd_shuffle!(
26267            a,
26268            b,
26269            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
26270        )
26271    }
26272}
26273#[doc = "Transpose vectors"]
26274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"]
26275#[inline(always)]
26276#[target_feature(enable = "neon")]
26277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26278#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26279pub fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
26280    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
26281}
26282#[doc = "Transpose vectors"]
26283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"]
26284#[inline(always)]
26285#[target_feature(enable = "neon")]
26286#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26287#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
26288pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
26289    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
26290}
26291#[doc = "Transpose vectors"]
26292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f16)"]
26293#[inline(always)]
26294#[target_feature(enable = "neon,fp16")]
26295#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26296#[cfg(not(target_arch = "arm64ec"))]
26297#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26298pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
26299    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26300}
26301#[doc = "Transpose vectors"]
26302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f16)"]
26303#[inline(always)]
26304#[target_feature(enable = "neon,fp16")]
26305#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26306#[cfg(not(target_arch = "arm64ec"))]
26307#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26308pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
26309    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26310}
26311#[doc = "Transpose vectors"]
26312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"]
26313#[inline(always)]
26314#[target_feature(enable = "neon")]
26315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26316#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26317pub fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
26318    unsafe { simd_shuffle!(a, b, [1, 3]) }
26319}
26320#[doc = "Transpose vectors"]
26321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"]
26322#[inline(always)]
26323#[target_feature(enable = "neon")]
26324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26325#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26326pub fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26327    unsafe { simd_shuffle!(a, b, [1, 3]) }
26328}
26329#[doc = "Transpose vectors"]
26330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"]
26331#[inline(always)]
26332#[target_feature(enable = "neon")]
26333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26334#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26335pub fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
26336    unsafe { simd_shuffle!(a, b, [1, 3]) }
26337}
26338#[doc = "Transpose vectors"]
26339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"]
26340#[inline(always)]
26341#[target_feature(enable = "neon")]
26342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26343#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26344pub fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
26345    unsafe { simd_shuffle!(a, b, [1, 3]) }
26346}
26347#[doc = "Transpose vectors"]
26348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"]
26349#[inline(always)]
26350#[target_feature(enable = "neon")]
26351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26352#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26353pub fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
26354    unsafe { simd_shuffle!(a, b, [1, 3]) }
26355}
26356#[doc = "Transpose vectors"]
26357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"]
26358#[inline(always)]
26359#[target_feature(enable = "neon")]
26360#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26361#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26362pub fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
26363    unsafe { simd_shuffle!(a, b, [1, 3]) }
26364}
26365#[doc = "Transpose vectors"]
26366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"]
26367#[inline(always)]
26368#[target_feature(enable = "neon")]
26369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26370#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
26371pub fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
26372    unsafe { simd_shuffle!(a, b, [1, 3]) }
26373}
26374#[doc = "Transpose vectors"]
26375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"]
26376#[inline(always)]
26377#[target_feature(enable = "neon")]
26378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26379#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26380pub fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
26381    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26382}
26383#[doc = "Transpose vectors"]
26384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"]
26385#[inline(always)]
26386#[target_feature(enable = "neon")]
26387#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26388#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26389pub fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
26390    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26391}
26392#[doc = "Transpose vectors"]
26393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"]
26394#[inline(always)]
26395#[target_feature(enable = "neon")]
26396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26397#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26398pub fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
26399    unsafe {
26400        simd_shuffle!(
26401            a,
26402            b,
26403            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
26404        )
26405    }
26406}
26407#[doc = "Transpose vectors"]
26408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"]
26409#[inline(always)]
26410#[target_feature(enable = "neon")]
26411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26412#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26413pub fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
26414    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26415}
26416#[doc = "Transpose vectors"]
26417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"]
26418#[inline(always)]
26419#[target_feature(enable = "neon")]
26420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26421#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26422pub fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
26423    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26424}
26425#[doc = "Transpose vectors"]
26426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"]
26427#[inline(always)]
26428#[target_feature(enable = "neon")]
26429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26430#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26431pub fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
26432    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26433}
26434#[doc = "Transpose vectors"]
26435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"]
26436#[inline(always)]
26437#[target_feature(enable = "neon")]
26438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26439#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26440pub fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
26441    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26442}
26443#[doc = "Transpose vectors"]
26444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"]
26445#[inline(always)]
26446#[target_feature(enable = "neon")]
26447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26448#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26449pub fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
26450    unsafe {
26451        simd_shuffle!(
26452            a,
26453            b,
26454            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
26455        )
26456    }
26457}
26458#[doc = "Transpose vectors"]
26459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"]
26460#[inline(always)]
26461#[target_feature(enable = "neon")]
26462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26463#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26464pub fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
26465    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26466}
26467#[doc = "Transpose vectors"]
26468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"]
26469#[inline(always)]
26470#[target_feature(enable = "neon")]
26471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26472#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26473pub fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
26474    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26475}
26476#[doc = "Transpose vectors"]
26477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"]
26478#[inline(always)]
26479#[target_feature(enable = "neon")]
26480#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26481#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26482pub fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
26483    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26484}
26485#[doc = "Transpose vectors"]
26486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"]
26487#[inline(always)]
26488#[target_feature(enable = "neon")]
26489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26490#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26491pub fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
26492    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26493}
26494#[doc = "Transpose vectors"]
26495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"]
26496#[inline(always)]
26497#[target_feature(enable = "neon")]
26498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26499#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26500pub fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
26501    unsafe {
26502        simd_shuffle!(
26503            a,
26504            b,
26505            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
26506        )
26507    }
26508}
26509#[doc = "Transpose vectors"]
26510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"]
26511#[inline(always)]
26512#[target_feature(enable = "neon")]
26513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26514#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26515pub fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
26516    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
26517}
26518#[doc = "Transpose vectors"]
26519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"]
26520#[inline(always)]
26521#[target_feature(enable = "neon")]
26522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26523#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
26524pub fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
26525    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
26526}
26527#[doc = "Signed compare bitwise Test bits nonzero"]
26528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"]
26529#[inline(always)]
26530#[target_feature(enable = "neon")]
26531#[cfg_attr(test, assert_instr(cmtst))]
26532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26533pub fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
26534    unsafe {
26535        let c: int64x1_t = simd_and(a, b);
26536        let d: i64x1 = i64x1::new(0);
26537        simd_ne(c, transmute(d))
26538    }
26539}
26540#[doc = "Signed compare bitwise Test bits nonzero"]
26541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"]
26542#[inline(always)]
26543#[target_feature(enable = "neon")]
26544#[cfg_attr(test, assert_instr(cmtst))]
26545#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26546pub fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
26547    unsafe {
26548        let c: int64x2_t = simd_and(a, b);
26549        let d: i64x2 = i64x2::new(0, 0);
26550        simd_ne(c, transmute(d))
26551    }
26552}
26553#[doc = "Signed compare bitwise Test bits nonzero"]
26554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"]
26555#[inline(always)]
26556#[target_feature(enable = "neon")]
26557#[cfg_attr(test, assert_instr(cmtst))]
26558#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26559pub fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
26560    unsafe {
26561        let c: poly64x1_t = simd_and(a, b);
26562        let d: i64x1 = i64x1::new(0);
26563        simd_ne(c, transmute(d))
26564    }
26565}
26566#[doc = "Signed compare bitwise Test bits nonzero"]
26567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"]
26568#[inline(always)]
26569#[target_feature(enable = "neon")]
26570#[cfg_attr(test, assert_instr(cmtst))]
26571#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26572pub fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
26573    unsafe {
26574        let c: poly64x2_t = simd_and(a, b);
26575        let d: i64x2 = i64x2::new(0, 0);
26576        simd_ne(c, transmute(d))
26577    }
26578}
26579#[doc = "Unsigned compare bitwise Test bits nonzero"]
26580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"]
26581#[inline(always)]
26582#[target_feature(enable = "neon")]
26583#[cfg_attr(test, assert_instr(cmtst))]
26584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26585pub fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
26586    unsafe {
26587        let c: uint64x1_t = simd_and(a, b);
26588        let d: u64x1 = u64x1::new(0);
26589        simd_ne(c, transmute(d))
26590    }
26591}
26592#[doc = "Unsigned compare bitwise Test bits nonzero"]
26593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"]
26594#[inline(always)]
26595#[target_feature(enable = "neon")]
26596#[cfg_attr(test, assert_instr(cmtst))]
26597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26598pub fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
26599    unsafe {
26600        let c: uint64x2_t = simd_and(a, b);
26601        let d: u64x2 = u64x2::new(0, 0);
26602        simd_ne(c, transmute(d))
26603    }
26604}
26605#[doc = "Compare bitwise test bits nonzero"]
26606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"]
26607#[inline(always)]
26608#[target_feature(enable = "neon")]
26609#[cfg_attr(test, assert_instr(tst))]
26610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26611pub fn vtstd_s64(a: i64, b: i64) -> u64 {
26612    unsafe { transmute(vtst_s64(transmute(a), transmute(b))) }
26613}
26614#[doc = "Compare bitwise test bits nonzero"]
26615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"]
26616#[inline(always)]
26617#[target_feature(enable = "neon")]
26618#[cfg_attr(test, assert_instr(tst))]
26619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26620pub fn vtstd_u64(a: u64, b: u64) -> u64 {
26621    unsafe { transmute(vtst_u64(transmute(a), transmute(b))) }
26622}
26623#[doc = "Signed saturating Accumulate of Unsigned value."]
26624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"]
26625#[inline(always)]
26626#[target_feature(enable = "neon")]
26627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26628#[cfg_attr(test, assert_instr(suqadd))]
26629pub fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t {
26630    unsafe extern "unadjusted" {
26631        #[cfg_attr(
26632            any(target_arch = "aarch64", target_arch = "arm64ec"),
26633            link_name = "llvm.aarch64.neon.suqadd.v8i8"
26634        )]
26635        fn _vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t;
26636    }
26637    unsafe { _vuqadd_s8(a, b) }
26638}
26639#[doc = "Signed saturating Accumulate of Unsigned value."]
26640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"]
26641#[inline(always)]
26642#[target_feature(enable = "neon")]
26643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26644#[cfg_attr(test, assert_instr(suqadd))]
26645pub fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
26646    unsafe extern "unadjusted" {
26647        #[cfg_attr(
26648            any(target_arch = "aarch64", target_arch = "arm64ec"),
26649            link_name = "llvm.aarch64.neon.suqadd.v16i8"
26650        )]
26651        fn _vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
26652    }
26653    unsafe { _vuqaddq_s8(a, b) }
26654}
26655#[doc = "Signed saturating Accumulate of Unsigned value."]
26656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"]
26657#[inline(always)]
26658#[target_feature(enable = "neon")]
26659#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26660#[cfg_attr(test, assert_instr(suqadd))]
26661pub fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t {
26662    unsafe extern "unadjusted" {
26663        #[cfg_attr(
26664            any(target_arch = "aarch64", target_arch = "arm64ec"),
26665            link_name = "llvm.aarch64.neon.suqadd.v4i16"
26666        )]
26667        fn _vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t;
26668    }
26669    unsafe { _vuqadd_s16(a, b) }
26670}
26671#[doc = "Signed saturating Accumulate of Unsigned value."]
26672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"]
26673#[inline(always)]
26674#[target_feature(enable = "neon")]
26675#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26676#[cfg_attr(test, assert_instr(suqadd))]
26677pub fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t {
26678    unsafe extern "unadjusted" {
26679        #[cfg_attr(
26680            any(target_arch = "aarch64", target_arch = "arm64ec"),
26681            link_name = "llvm.aarch64.neon.suqadd.v8i16"
26682        )]
26683        fn _vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t;
26684    }
26685    unsafe { _vuqaddq_s16(a, b) }
26686}
26687#[doc = "Signed saturating Accumulate of Unsigned value."]
26688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"]
26689#[inline(always)]
26690#[target_feature(enable = "neon")]
26691#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26692#[cfg_attr(test, assert_instr(suqadd))]
26693pub fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t {
26694    unsafe extern "unadjusted" {
26695        #[cfg_attr(
26696            any(target_arch = "aarch64", target_arch = "arm64ec"),
26697            link_name = "llvm.aarch64.neon.suqadd.v2i32"
26698        )]
26699        fn _vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t;
26700    }
26701    unsafe { _vuqadd_s32(a, b) }
26702}
26703#[doc = "Signed saturating Accumulate of Unsigned value."]
26704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"]
26705#[inline(always)]
26706#[target_feature(enable = "neon")]
26707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26708#[cfg_attr(test, assert_instr(suqadd))]
26709pub fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t {
26710    unsafe extern "unadjusted" {
26711        #[cfg_attr(
26712            any(target_arch = "aarch64", target_arch = "arm64ec"),
26713            link_name = "llvm.aarch64.neon.suqadd.v4i32"
26714        )]
26715        fn _vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t;
26716    }
26717    unsafe { _vuqaddq_s32(a, b) }
26718}
26719#[doc = "Signed saturating Accumulate of Unsigned value."]
26720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"]
26721#[inline(always)]
26722#[target_feature(enable = "neon")]
26723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26724#[cfg_attr(test, assert_instr(suqadd))]
26725pub fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t {
26726    unsafe extern "unadjusted" {
26727        #[cfg_attr(
26728            any(target_arch = "aarch64", target_arch = "arm64ec"),
26729            link_name = "llvm.aarch64.neon.suqadd.v1i64"
26730        )]
26731        fn _vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t;
26732    }
26733    unsafe { _vuqadd_s64(a, b) }
26734}
26735#[doc = "Signed saturating Accumulate of Unsigned value."]
26736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"]
26737#[inline(always)]
26738#[target_feature(enable = "neon")]
26739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26740#[cfg_attr(test, assert_instr(suqadd))]
26741pub fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t {
26742    unsafe extern "unadjusted" {
26743        #[cfg_attr(
26744            any(target_arch = "aarch64", target_arch = "arm64ec"),
26745            link_name = "llvm.aarch64.neon.suqadd.v2i64"
26746        )]
26747        fn _vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t;
26748    }
26749    unsafe { _vuqaddq_s64(a, b) }
26750}
26751#[doc = "Signed saturating accumulate of unsigned value"]
26752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"]
26753#[inline(always)]
26754#[target_feature(enable = "neon")]
26755#[cfg_attr(test, assert_instr(suqadd))]
26756#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26757pub fn vuqaddb_s8(a: i8, b: u8) -> i8 {
26758    unsafe { simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) }
26759}
26760#[doc = "Signed saturating accumulate of unsigned value"]
26761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"]
26762#[inline(always)]
26763#[target_feature(enable = "neon")]
26764#[cfg_attr(test, assert_instr(suqadd))]
26765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26766pub fn vuqaddh_s16(a: i16, b: u16) -> i16 {
26767    unsafe { simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) }
26768}
26769#[doc = "Signed saturating accumulate of unsigned value"]
26770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"]
26771#[inline(always)]
26772#[target_feature(enable = "neon")]
26773#[cfg_attr(test, assert_instr(suqadd))]
26774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26775pub fn vuqaddd_s64(a: i64, b: u64) -> i64 {
26776    unsafe extern "unadjusted" {
26777        #[cfg_attr(
26778            any(target_arch = "aarch64", target_arch = "arm64ec"),
26779            link_name = "llvm.aarch64.neon.suqadd.i64"
26780        )]
26781        fn _vuqaddd_s64(a: i64, b: u64) -> i64;
26782    }
26783    unsafe { _vuqaddd_s64(a, b) }
26784}
26785#[doc = "Signed saturating accumulate of unsigned value"]
26786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"]
26787#[inline(always)]
26788#[target_feature(enable = "neon")]
26789#[cfg_attr(test, assert_instr(suqadd))]
26790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26791pub fn vuqadds_s32(a: i32, b: u32) -> i32 {
26792    unsafe extern "unadjusted" {
26793        #[cfg_attr(
26794            any(target_arch = "aarch64", target_arch = "arm64ec"),
26795            link_name = "llvm.aarch64.neon.suqadd.i32"
26796        )]
26797        fn _vuqadds_s32(a: i32, b: u32) -> i32;
26798    }
26799    unsafe { _vuqadds_s32(a, b) }
26800}
26801#[doc = "Unzip vectors"]
26802#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f16)"]
26803#[inline(always)]
26804#[target_feature(enable = "neon,fp16")]
26805#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26806#[cfg(not(target_arch = "arm64ec"))]
26807#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26808pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
26809    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
26810}
26811#[doc = "Unzip vectors"]
26812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f16)"]
26813#[inline(always)]
26814#[target_feature(enable = "neon,fp16")]
26815#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
26816#[cfg(not(target_arch = "arm64ec"))]
26817#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26818pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
26819    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
26820}
26821#[doc = "Unzip vectors"]
26822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"]
26823#[inline(always)]
26824#[target_feature(enable = "neon")]
26825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26826#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26827pub fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
26828    unsafe { simd_shuffle!(a, b, [0, 2]) }
26829}
26830#[doc = "Unzip vectors"]
26831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"]
26832#[inline(always)]
26833#[target_feature(enable = "neon")]
26834#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26835#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26836pub fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
26837    unsafe { simd_shuffle!(a, b, [0, 2]) }
26838}
26839#[doc = "Unzip vectors"]
26840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"]
26841#[inline(always)]
26842#[target_feature(enable = "neon")]
26843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26844#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26845pub fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
26846    unsafe { simd_shuffle!(a, b, [0, 2]) }
26847}
26848#[doc = "Unzip vectors"]
26849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"]
26850#[inline(always)]
26851#[target_feature(enable = "neon")]
26852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26853#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26854pub fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
26855    unsafe { simd_shuffle!(a, b, [0, 2]) }
26856}
26857#[doc = "Unzip vectors"]
26858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"]
26859#[inline(always)]
26860#[target_feature(enable = "neon")]
26861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26862#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26863pub fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
26864    unsafe { simd_shuffle!(a, b, [0, 2]) }
26865}
26866#[doc = "Unzip vectors"]
26867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"]
26868#[inline(always)]
26869#[target_feature(enable = "neon")]
26870#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26871#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26872pub fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
26873    unsafe { simd_shuffle!(a, b, [0, 2]) }
26874}
26875#[doc = "Unzip vectors"]
26876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"]
26877#[inline(always)]
26878#[target_feature(enable = "neon")]
26879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26880#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
26881pub fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
26882    unsafe { simd_shuffle!(a, b, [0, 2]) }
26883}
26884#[doc = "Unzip vectors"]
26885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"]
26886#[inline(always)]
26887#[target_feature(enable = "neon")]
26888#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26889#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26890pub fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
26891    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
26892}
26893#[doc = "Unzip vectors"]
26894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"]
26895#[inline(always)]
26896#[target_feature(enable = "neon")]
26897#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26898#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26899pub fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
26900    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
26901}
26902#[doc = "Unzip vectors"]
26903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"]
26904#[inline(always)]
26905#[target_feature(enable = "neon")]
26906#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26907#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26908pub fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
26909    unsafe {
26910        simd_shuffle!(
26911            a,
26912            b,
26913            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
26914        )
26915    }
26916}
26917#[doc = "Unzip vectors"]
26918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"]
26919#[inline(always)]
26920#[target_feature(enable = "neon")]
26921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26922#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26923pub fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
26924    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
26925}
26926#[doc = "Unzip vectors"]
26927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"]
26928#[inline(always)]
26929#[target_feature(enable = "neon")]
26930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26931#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26932pub fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
26933    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
26934}
26935#[doc = "Unzip vectors"]
26936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"]
26937#[inline(always)]
26938#[target_feature(enable = "neon")]
26939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26940#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26941pub fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
26942    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
26943}
26944#[doc = "Unzip vectors"]
26945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"]
26946#[inline(always)]
26947#[target_feature(enable = "neon")]
26948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26949#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26950pub fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
26951    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
26952}
26953#[doc = "Unzip vectors"]
26954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"]
26955#[inline(always)]
26956#[target_feature(enable = "neon")]
26957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26958#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26959pub fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
26960    unsafe {
26961        simd_shuffle!(
26962            a,
26963            b,
26964            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
26965        )
26966    }
26967}
26968#[doc = "Unzip vectors"]
26969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"]
26970#[inline(always)]
26971#[target_feature(enable = "neon")]
26972#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26973#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26974pub fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
26975    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
26976}
26977#[doc = "Unzip vectors"]
26978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"]
26979#[inline(always)]
26980#[target_feature(enable = "neon")]
26981#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26982#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26983pub fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
26984    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
26985}
26986#[doc = "Unzip vectors"]
26987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"]
26988#[inline(always)]
26989#[target_feature(enable = "neon")]
26990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26991#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
26992pub fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
26993    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
26994}
26995#[doc = "Unzip vectors"]
26996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"]
26997#[inline(always)]
26998#[target_feature(enable = "neon")]
26999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27000#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27001pub fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27002    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27003}
27004#[doc = "Unzip vectors"]
27005#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"]
27006#[inline(always)]
27007#[target_feature(enable = "neon")]
27008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27009#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27010pub fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27011    unsafe {
27012        simd_shuffle!(
27013            a,
27014            b,
27015            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
27016        )
27017    }
27018}
27019#[doc = "Unzip vectors"]
27020#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"]
27021#[inline(always)]
27022#[target_feature(enable = "neon")]
27023#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27024#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27025pub fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27026    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
27027}
27028#[doc = "Unzip vectors"]
27029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"]
27030#[inline(always)]
27031#[target_feature(enable = "neon")]
27032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27033#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
27034pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27035    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
27036}
27037#[doc = "Unzip vectors"]
27038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f16)"]
27039#[inline(always)]
27040#[target_feature(enable = "neon,fp16")]
27041#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27042#[cfg(not(target_arch = "arm64ec"))]
27043#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27044pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27045    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27046}
27047#[doc = "Unzip vectors"]
27048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f16)"]
27049#[inline(always)]
27050#[target_feature(enable = "neon,fp16")]
27051#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27052#[cfg(not(target_arch = "arm64ec"))]
27053#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27054pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27055    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27056}
27057#[doc = "Unzip vectors"]
27058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"]
27059#[inline(always)]
27060#[target_feature(enable = "neon")]
27061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27062#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27063pub fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27064    unsafe { simd_shuffle!(a, b, [1, 3]) }
27065}
27066#[doc = "Unzip vectors"]
27067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"]
27068#[inline(always)]
27069#[target_feature(enable = "neon")]
27070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27071#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27072pub fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27073    unsafe { simd_shuffle!(a, b, [1, 3]) }
27074}
27075#[doc = "Unzip vectors"]
27076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"]
27077#[inline(always)]
27078#[target_feature(enable = "neon")]
27079#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27080#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27081pub fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27082    unsafe { simd_shuffle!(a, b, [1, 3]) }
27083}
27084#[doc = "Unzip vectors"]
27085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"]
27086#[inline(always)]
27087#[target_feature(enable = "neon")]
27088#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27089#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27090pub fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27091    unsafe { simd_shuffle!(a, b, [1, 3]) }
27092}
27093#[doc = "Unzip vectors"]
27094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"]
27095#[inline(always)]
27096#[target_feature(enable = "neon")]
27097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27098#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27099pub fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27100    unsafe { simd_shuffle!(a, b, [1, 3]) }
27101}
27102#[doc = "Unzip vectors"]
27103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"]
27104#[inline(always)]
27105#[target_feature(enable = "neon")]
27106#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27107#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27108pub fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27109    unsafe { simd_shuffle!(a, b, [1, 3]) }
27110}
27111#[doc = "Unzip vectors"]
27112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"]
27113#[inline(always)]
27114#[target_feature(enable = "neon")]
27115#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27116#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27117pub fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27118    unsafe { simd_shuffle!(a, b, [1, 3]) }
27119}
27120#[doc = "Unzip vectors"]
27121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"]
27122#[inline(always)]
27123#[target_feature(enable = "neon")]
27124#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27125#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27126pub fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27127    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27128}
27129#[doc = "Unzip vectors"]
27130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"]
27131#[inline(always)]
27132#[target_feature(enable = "neon")]
27133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27134#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27135pub fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27136    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27137}
27138#[doc = "Unzip vectors"]
27139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"]
27140#[inline(always)]
27141#[target_feature(enable = "neon")]
27142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27143#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27144pub fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27145    unsafe {
27146        simd_shuffle!(
27147            a,
27148            b,
27149            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
27150        )
27151    }
27152}
27153#[doc = "Unzip vectors"]
27154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"]
27155#[inline(always)]
27156#[target_feature(enable = "neon")]
27157#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27158#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27159pub fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27160    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27161}
27162#[doc = "Unzip vectors"]
27163#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"]
27164#[inline(always)]
27165#[target_feature(enable = "neon")]
27166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27167#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27168pub fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27169    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27170}
27171#[doc = "Unzip vectors"]
27172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"]
27173#[inline(always)]
27174#[target_feature(enable = "neon")]
27175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27176#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27177pub fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27178    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27179}
27180#[doc = "Unzip vectors"]
27181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"]
27182#[inline(always)]
27183#[target_feature(enable = "neon")]
27184#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27185#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27186pub fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27187    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27188}
27189#[doc = "Unzip vectors"]
27190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"]
27191#[inline(always)]
27192#[target_feature(enable = "neon")]
27193#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27194#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27195pub fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27196    unsafe {
27197        simd_shuffle!(
27198            a,
27199            b,
27200            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
27201        )
27202    }
27203}
27204#[doc = "Unzip vectors"]
27205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"]
27206#[inline(always)]
27207#[target_feature(enable = "neon")]
27208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27209#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27210pub fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27211    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27212}
27213#[doc = "Unzip vectors"]
27214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"]
27215#[inline(always)]
27216#[target_feature(enable = "neon")]
27217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27218#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27219pub fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27220    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27221}
27222#[doc = "Unzip vectors"]
27223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"]
27224#[inline(always)]
27225#[target_feature(enable = "neon")]
27226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27227#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27228pub fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27229    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27230}
27231#[doc = "Unzip vectors"]
27232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"]
27233#[inline(always)]
27234#[target_feature(enable = "neon")]
27235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27236#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27237pub fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27238    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27239}
27240#[doc = "Unzip vectors"]
27241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"]
27242#[inline(always)]
27243#[target_feature(enable = "neon")]
27244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27245#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27246pub fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27247    unsafe {
27248        simd_shuffle!(
27249            a,
27250            b,
27251            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
27252        )
27253    }
27254}
27255#[doc = "Unzip vectors"]
27256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"]
27257#[inline(always)]
27258#[target_feature(enable = "neon")]
27259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27260#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27261pub fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27262    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
27263}
27264#[doc = "Unzip vectors"]
27265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"]
27266#[inline(always)]
27267#[target_feature(enable = "neon")]
27268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27269#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
27270pub fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27271    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
27272}
27273#[doc = "Exclusive OR and rotate"]
27274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"]
27275#[inline(always)]
27276#[target_feature(enable = "neon,sha3")]
27277#[cfg_attr(test, assert_instr(xar, IMM6 = 0))]
27278#[rustc_legacy_const_generics(2)]
27279#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
27280pub fn vxarq_u64<const IMM6: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27281    static_assert_uimm_bits!(IMM6, 6);
27282    unsafe extern "unadjusted" {
27283        #[cfg_attr(
27284            any(target_arch = "aarch64", target_arch = "arm64ec"),
27285            link_name = "llvm.aarch64.crypto.xar"
27286        )]
27287        fn _vxarq_u64(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t;
27288    }
27289    unsafe { _vxarq_u64(a, b, IMM6 as i64) }
27290}
27291#[doc = "Zip vectors"]
27292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"]
27293#[inline(always)]
27294#[target_feature(enable = "neon,fp16")]
27295#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27296#[cfg(not(target_arch = "arm64ec"))]
27297#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27298pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27299    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27300}
27301#[doc = "Zip vectors"]
27302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f16)"]
27303#[inline(always)]
27304#[target_feature(enable = "neon,fp16")]
27305#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27306#[cfg(not(target_arch = "arm64ec"))]
27307#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27308pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27309    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27310}
27311#[doc = "Zip vectors"]
27312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"]
27313#[inline(always)]
27314#[target_feature(enable = "neon")]
27315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27316#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27317pub fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27318    unsafe { simd_shuffle!(a, b, [0, 2]) }
27319}
27320#[doc = "Zip vectors"]
27321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"]
27322#[inline(always)]
27323#[target_feature(enable = "neon")]
27324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27325#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27326pub fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27327    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27328}
27329#[doc = "Zip vectors"]
27330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"]
27331#[inline(always)]
27332#[target_feature(enable = "neon")]
27333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27334#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27335pub fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27336    unsafe { simd_shuffle!(a, b, [0, 2]) }
27337}
27338#[doc = "Zip vectors"]
27339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"]
27340#[inline(always)]
27341#[target_feature(enable = "neon")]
27342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27343#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27344pub fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27345    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27346}
27347#[doc = "Zip vectors"]
27348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"]
27349#[inline(always)]
27350#[target_feature(enable = "neon")]
27351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27352#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27353pub fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27354    unsafe {
27355        simd_shuffle!(
27356            a,
27357            b,
27358            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
27359        )
27360    }
27361}
27362#[doc = "Zip vectors"]
27363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"]
27364#[inline(always)]
27365#[target_feature(enable = "neon")]
27366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27367#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27368pub fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27369    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27370}
27371#[doc = "Zip vectors"]
27372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"]
27373#[inline(always)]
27374#[target_feature(enable = "neon")]
27375#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27376#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27377pub fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27378    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27379}
27380#[doc = "Zip vectors"]
27381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"]
27382#[inline(always)]
27383#[target_feature(enable = "neon")]
27384#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27385#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27386pub fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27387    unsafe { simd_shuffle!(a, b, [0, 2]) }
27388}
27389#[doc = "Zip vectors"]
27390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"]
27391#[inline(always)]
27392#[target_feature(enable = "neon")]
27393#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27394#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27395pub fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27396    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27397}
27398#[doc = "Zip vectors"]
27399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"]
27400#[inline(always)]
27401#[target_feature(enable = "neon")]
27402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27403#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27404pub fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27405    unsafe { simd_shuffle!(a, b, [0, 2]) }
27406}
27407#[doc = "Zip vectors"]
27408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"]
27409#[inline(always)]
27410#[target_feature(enable = "neon")]
27411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27412#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27413pub fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27414    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27415}
27416#[doc = "Zip vectors"]
27417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"]
27418#[inline(always)]
27419#[target_feature(enable = "neon")]
27420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27421#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27422pub fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27423    unsafe {
27424        simd_shuffle!(
27425            a,
27426            b,
27427            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
27428        )
27429    }
27430}
27431#[doc = "Zip vectors"]
27432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"]
27433#[inline(always)]
27434#[target_feature(enable = "neon")]
27435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27436#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27437pub fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27438    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27439}
27440#[doc = "Zip vectors"]
27441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"]
27442#[inline(always)]
27443#[target_feature(enable = "neon")]
27444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27445#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27446pub fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27447    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27448}
27449#[doc = "Zip vectors"]
27450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"]
27451#[inline(always)]
27452#[target_feature(enable = "neon")]
27453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27454#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27455pub fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27456    unsafe { simd_shuffle!(a, b, [0, 2]) }
27457}
27458#[doc = "Zip vectors"]
27459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"]
27460#[inline(always)]
27461#[target_feature(enable = "neon")]
27462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27463#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27464pub fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27465    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27466}
27467#[doc = "Zip vectors"]
27468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"]
27469#[inline(always)]
27470#[target_feature(enable = "neon")]
27471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27472#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27473pub fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27474    unsafe { simd_shuffle!(a, b, [0, 2]) }
27475}
27476#[doc = "Zip vectors"]
27477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"]
27478#[inline(always)]
27479#[target_feature(enable = "neon")]
27480#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27481#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27482pub fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27483    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27484}
27485#[doc = "Zip vectors"]
27486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"]
27487#[inline(always)]
27488#[target_feature(enable = "neon")]
27489#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27490#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27491pub fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27492    unsafe {
27493        simd_shuffle!(
27494            a,
27495            b,
27496            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
27497        )
27498    }
27499}
27500#[doc = "Zip vectors"]
27501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"]
27502#[inline(always)]
27503#[target_feature(enable = "neon")]
27504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27505#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27506pub fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27507    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
27508}
27509#[doc = "Zip vectors"]
27510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"]
27511#[inline(always)]
27512#[target_feature(enable = "neon")]
27513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27514#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27515pub fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27516    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
27517}
27518#[doc = "Zip vectors"]
27519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"]
27520#[inline(always)]
27521#[target_feature(enable = "neon")]
27522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27523#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
27524pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27525    unsafe { simd_shuffle!(a, b, [0, 2]) }
27526}
27527#[doc = "Zip vectors"]
27528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f16)"]
27529#[inline(always)]
27530#[target_feature(enable = "neon,fp16")]
27531#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27532#[cfg(not(target_arch = "arm64ec"))]
27533#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27534pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
27535    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27536}
27537#[doc = "Zip vectors"]
27538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f16)"]
27539#[inline(always)]
27540#[target_feature(enable = "neon,fp16")]
27541#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
27542#[cfg(not(target_arch = "arm64ec"))]
27543#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27544pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
27545    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27546}
27547#[doc = "Zip vectors"]
27548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"]
27549#[inline(always)]
27550#[target_feature(enable = "neon")]
27551#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27552#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27553pub fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
27554    unsafe { simd_shuffle!(a, b, [1, 3]) }
27555}
27556#[doc = "Zip vectors"]
27557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"]
27558#[inline(always)]
27559#[target_feature(enable = "neon")]
27560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27561#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27562pub fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
27563    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27564}
27565#[doc = "Zip vectors"]
27566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"]
27567#[inline(always)]
27568#[target_feature(enable = "neon")]
27569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27570#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27571pub fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27572    unsafe { simd_shuffle!(a, b, [1, 3]) }
27573}
27574#[doc = "Zip vectors"]
27575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"]
27576#[inline(always)]
27577#[target_feature(enable = "neon")]
27578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27579#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27580pub fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27581    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27582}
27583#[doc = "Zip vectors"]
27584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"]
27585#[inline(always)]
27586#[target_feature(enable = "neon")]
27587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27588#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27589pub fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
27590    unsafe {
27591        simd_shuffle!(
27592            a,
27593            b,
27594            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
27595        )
27596    }
27597}
27598#[doc = "Zip vectors"]
27599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"]
27600#[inline(always)]
27601#[target_feature(enable = "neon")]
27602#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27603#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27604pub fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
27605    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27606}
27607#[doc = "Zip vectors"]
27608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"]
27609#[inline(always)]
27610#[target_feature(enable = "neon")]
27611#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27612#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27613pub fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
27614    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27615}
27616#[doc = "Zip vectors"]
27617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"]
27618#[inline(always)]
27619#[target_feature(enable = "neon")]
27620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27621#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27622pub fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
27623    unsafe { simd_shuffle!(a, b, [1, 3]) }
27624}
27625#[doc = "Zip vectors"]
27626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"]
27627#[inline(always)]
27628#[target_feature(enable = "neon")]
27629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27630#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27631pub fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
27632    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27633}
27634#[doc = "Zip vectors"]
27635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"]
27636#[inline(always)]
27637#[target_feature(enable = "neon")]
27638#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27639#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27640pub fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
27641    unsafe { simd_shuffle!(a, b, [1, 3]) }
27642}
27643#[doc = "Zip vectors"]
27644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"]
27645#[inline(always)]
27646#[target_feature(enable = "neon")]
27647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27648#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27649pub fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27650    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27651}
27652#[doc = "Zip vectors"]
27653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"]
27654#[inline(always)]
27655#[target_feature(enable = "neon")]
27656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27657#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27658pub fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
27659    unsafe {
27660        simd_shuffle!(
27661            a,
27662            b,
27663            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
27664        )
27665    }
27666}
27667#[doc = "Zip vectors"]
27668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"]
27669#[inline(always)]
27670#[target_feature(enable = "neon")]
27671#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27672#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27673pub fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
27674    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27675}
27676#[doc = "Zip vectors"]
27677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"]
27678#[inline(always)]
27679#[target_feature(enable = "neon")]
27680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27681#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27682pub fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
27683    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27684}
27685#[doc = "Zip vectors"]
27686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"]
27687#[inline(always)]
27688#[target_feature(enable = "neon")]
27689#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27690#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27691pub fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
27692    unsafe { simd_shuffle!(a, b, [1, 3]) }
27693}
27694#[doc = "Zip vectors"]
27695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"]
27696#[inline(always)]
27697#[target_feature(enable = "neon")]
27698#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27699#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27700pub fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
27701    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27702}
27703#[doc = "Zip vectors"]
27704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"]
27705#[inline(always)]
27706#[target_feature(enable = "neon")]
27707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27708#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27709pub fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
27710    unsafe { simd_shuffle!(a, b, [1, 3]) }
27711}
27712#[doc = "Zip vectors"]
27713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"]
27714#[inline(always)]
27715#[target_feature(enable = "neon")]
27716#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27717#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27718pub fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
27719    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27720}
27721#[doc = "Zip vectors"]
27722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"]
27723#[inline(always)]
27724#[target_feature(enable = "neon")]
27725#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27726#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27727pub fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
27728    unsafe {
27729        simd_shuffle!(
27730            a,
27731            b,
27732            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
27733        )
27734    }
27735}
27736#[doc = "Zip vectors"]
27737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"]
27738#[inline(always)]
27739#[target_feature(enable = "neon")]
27740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27741#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27742pub fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
27743    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
27744}
27745#[doc = "Zip vectors"]
27746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"]
27747#[inline(always)]
27748#[target_feature(enable = "neon")]
27749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27750#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27751pub fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
27752    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
27753}
27754#[doc = "Zip vectors"]
27755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"]
27756#[inline(always)]
27757#[target_feature(enable = "neon")]
27758#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27759#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
27760pub fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
27761    unsafe { simd_shuffle!(a, b, [1, 3]) }
27762}