1#![allow(improper_ctypes)]
9
10#[cfg(test)]
11use stdarch_test::assert_instr;
12
13use super::*;
14
15#[doc = "Absolute difference and accumulate"]
16#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s8])"]
17#[inline(always)]
18#[target_feature(enable = "sve,sve2")]
19#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20#[cfg_attr(test, assert_instr(saba))]
21pub fn svaba_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
22 unsafe extern "unadjusted" {
23 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv16i8")]
24 fn _svaba_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
25 }
26 unsafe { _svaba_s8(op1, op2, op3) }
27}
28#[doc = "Absolute difference and accumulate"]
29#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s8])"]
30#[inline(always)]
31#[target_feature(enable = "sve,sve2")]
32#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33#[cfg_attr(test, assert_instr(saba))]
34pub fn svaba_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
35 svaba_s8(op1, op2, svdup_n_s8(op3))
36}
37#[doc = "Absolute difference and accumulate"]
38#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s16])"]
39#[inline(always)]
40#[target_feature(enable = "sve,sve2")]
41#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42#[cfg_attr(test, assert_instr(saba))]
43pub fn svaba_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
44 unsafe extern "unadjusted" {
45 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv8i16")]
46 fn _svaba_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
47 }
48 unsafe { _svaba_s16(op1, op2, op3) }
49}
50#[doc = "Absolute difference and accumulate"]
51#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s16])"]
52#[inline(always)]
53#[target_feature(enable = "sve,sve2")]
54#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
55#[cfg_attr(test, assert_instr(saba))]
56pub fn svaba_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
57 svaba_s16(op1, op2, svdup_n_s16(op3))
58}
59#[doc = "Absolute difference and accumulate"]
60#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s32])"]
61#[inline(always)]
62#[target_feature(enable = "sve,sve2")]
63#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
64#[cfg_attr(test, assert_instr(saba))]
65pub fn svaba_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
66 unsafe extern "unadjusted" {
67 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv4i32")]
68 fn _svaba_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
69 }
70 unsafe { _svaba_s32(op1, op2, op3) }
71}
72#[doc = "Absolute difference and accumulate"]
73#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s32])"]
74#[inline(always)]
75#[target_feature(enable = "sve,sve2")]
76#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
77#[cfg_attr(test, assert_instr(saba))]
78pub fn svaba_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
79 svaba_s32(op1, op2, svdup_n_s32(op3))
80}
81#[doc = "Absolute difference and accumulate"]
82#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s64])"]
83#[inline(always)]
84#[target_feature(enable = "sve,sve2")]
85#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
86#[cfg_attr(test, assert_instr(saba))]
87pub fn svaba_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
88 unsafe extern "unadjusted" {
89 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv2i64")]
90 fn _svaba_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
91 }
92 unsafe { _svaba_s64(op1, op2, op3) }
93}
94#[doc = "Absolute difference and accumulate"]
95#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s64])"]
96#[inline(always)]
97#[target_feature(enable = "sve,sve2")]
98#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
99#[cfg_attr(test, assert_instr(saba))]
100pub fn svaba_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
101 svaba_s64(op1, op2, svdup_n_s64(op3))
102}
103#[doc = "Absolute difference and accumulate"]
104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u8])"]
105#[inline(always)]
106#[target_feature(enable = "sve,sve2")]
107#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
108#[cfg_attr(test, assert_instr(uaba))]
109pub fn svaba_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
110 unsafe extern "unadjusted" {
111 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv16i8")]
112 fn _svaba_u8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
113 }
114 unsafe { _svaba_u8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
115}
116#[doc = "Absolute difference and accumulate"]
117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u8])"]
118#[inline(always)]
119#[target_feature(enable = "sve,sve2")]
120#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
121#[cfg_attr(test, assert_instr(uaba))]
122pub fn svaba_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
123 svaba_u8(op1, op2, svdup_n_u8(op3))
124}
125#[doc = "Absolute difference and accumulate"]
126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u16])"]
127#[inline(always)]
128#[target_feature(enable = "sve,sve2")]
129#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
130#[cfg_attr(test, assert_instr(uaba))]
131pub fn svaba_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
132 unsafe extern "unadjusted" {
133 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv8i16")]
134 fn _svaba_u16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
135 }
136 unsafe { _svaba_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
137}
138#[doc = "Absolute difference and accumulate"]
139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u16])"]
140#[inline(always)]
141#[target_feature(enable = "sve,sve2")]
142#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
143#[cfg_attr(test, assert_instr(uaba))]
144pub fn svaba_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
145 svaba_u16(op1, op2, svdup_n_u16(op3))
146}
147#[doc = "Absolute difference and accumulate"]
148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u32])"]
149#[inline(always)]
150#[target_feature(enable = "sve,sve2")]
151#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
152#[cfg_attr(test, assert_instr(uaba))]
153pub fn svaba_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
154 unsafe extern "unadjusted" {
155 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv4i32")]
156 fn _svaba_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
157 }
158 unsafe { _svaba_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
159}
160#[doc = "Absolute difference and accumulate"]
161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u32])"]
162#[inline(always)]
163#[target_feature(enable = "sve,sve2")]
164#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
165#[cfg_attr(test, assert_instr(uaba))]
166pub fn svaba_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
167 svaba_u32(op1, op2, svdup_n_u32(op3))
168}
169#[doc = "Absolute difference and accumulate"]
170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u64])"]
171#[inline(always)]
172#[target_feature(enable = "sve,sve2")]
173#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
174#[cfg_attr(test, assert_instr(uaba))]
175pub fn svaba_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
176 unsafe extern "unadjusted" {
177 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv2i64")]
178 fn _svaba_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
179 }
180 unsafe { _svaba_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
181}
182#[doc = "Absolute difference and accumulate"]
183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u64])"]
184#[inline(always)]
185#[target_feature(enable = "sve,sve2")]
186#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
187#[cfg_attr(test, assert_instr(uaba))]
188pub fn svaba_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
189 svaba_u64(op1, op2, svdup_n_u64(op3))
190}
191#[doc = "Absolute difference long (bottom)"]
192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s16])"]
193#[inline(always)]
194#[target_feature(enable = "sve,sve2")]
195#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
196#[cfg_attr(test, assert_instr(sabalb))]
197pub fn svabalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
198 unsafe extern "unadjusted" {
199 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv8i16")]
200 fn _svabalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
201 }
202 unsafe { _svabalb_s16(op1, op2, op3) }
203}
204#[doc = "Absolute difference long (bottom)"]
205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s16])"]
206#[inline(always)]
207#[target_feature(enable = "sve,sve2")]
208#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
209#[cfg_attr(test, assert_instr(sabalb))]
210pub fn svabalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
211 svabalb_s16(op1, op2, svdup_n_s8(op3))
212}
213#[doc = "Absolute difference long (bottom)"]
214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s32])"]
215#[inline(always)]
216#[target_feature(enable = "sve,sve2")]
217#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
218#[cfg_attr(test, assert_instr(sabalb))]
219pub fn svabalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
220 unsafe extern "unadjusted" {
221 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv4i32")]
222 fn _svabalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
223 }
224 unsafe { _svabalb_s32(op1, op2, op3) }
225}
226#[doc = "Absolute difference long (bottom)"]
227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s32])"]
228#[inline(always)]
229#[target_feature(enable = "sve,sve2")]
230#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
231#[cfg_attr(test, assert_instr(sabalb))]
232pub fn svabalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
233 svabalb_s32(op1, op2, svdup_n_s16(op3))
234}
235#[doc = "Absolute difference long (bottom)"]
236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s64])"]
237#[inline(always)]
238#[target_feature(enable = "sve,sve2")]
239#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
240#[cfg_attr(test, assert_instr(sabalb))]
241pub fn svabalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
242 unsafe extern "unadjusted" {
243 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv2i64")]
244 fn _svabalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
245 }
246 unsafe { _svabalb_s64(op1, op2, op3) }
247}
248#[doc = "Absolute difference long (bottom)"]
249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s64])"]
250#[inline(always)]
251#[target_feature(enable = "sve,sve2")]
252#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
253#[cfg_attr(test, assert_instr(sabalb))]
254pub fn svabalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
255 svabalb_s64(op1, op2, svdup_n_s32(op3))
256}
257#[doc = "Absolute difference long (bottom)"]
258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u16])"]
259#[inline(always)]
260#[target_feature(enable = "sve,sve2")]
261#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
262#[cfg_attr(test, assert_instr(uabalb))]
263pub fn svabalb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t {
264 unsafe extern "unadjusted" {
265 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv8i16")]
266 fn _svabalb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
267 }
268 unsafe { _svabalb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
269}
270#[doc = "Absolute difference long (bottom)"]
271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u16])"]
272#[inline(always)]
273#[target_feature(enable = "sve,sve2")]
274#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
275#[cfg_attr(test, assert_instr(uabalb))]
276pub fn svabalb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t {
277 svabalb_u16(op1, op2, svdup_n_u8(op3))
278}
279#[doc = "Absolute difference long (bottom)"]
280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u32])"]
281#[inline(always)]
282#[target_feature(enable = "sve,sve2")]
283#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
284#[cfg_attr(test, assert_instr(uabalb))]
285pub fn svabalb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t {
286 unsafe extern "unadjusted" {
287 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv4i32")]
288 fn _svabalb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
289 }
290 unsafe { _svabalb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
291}
292#[doc = "Absolute difference long (bottom)"]
293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u32])"]
294#[inline(always)]
295#[target_feature(enable = "sve,sve2")]
296#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
297#[cfg_attr(test, assert_instr(uabalb))]
298pub fn svabalb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t {
299 svabalb_u32(op1, op2, svdup_n_u16(op3))
300}
301#[doc = "Absolute difference long (bottom)"]
302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u64])"]
303#[inline(always)]
304#[target_feature(enable = "sve,sve2")]
305#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
306#[cfg_attr(test, assert_instr(uabalb))]
307pub fn svabalb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t {
308 unsafe extern "unadjusted" {
309 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv2i64")]
310 fn _svabalb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
311 }
312 unsafe { _svabalb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
313}
314#[doc = "Absolute difference long (bottom)"]
315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u64])"]
316#[inline(always)]
317#[target_feature(enable = "sve,sve2")]
318#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
319#[cfg_attr(test, assert_instr(uabalb))]
320pub fn svabalb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t {
321 svabalb_u64(op1, op2, svdup_n_u32(op3))
322}
323#[doc = "Absolute difference long (top)"]
324#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s16])"]
325#[inline(always)]
326#[target_feature(enable = "sve,sve2")]
327#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
328#[cfg_attr(test, assert_instr(sabalt))]
329pub fn svabalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
330 unsafe extern "unadjusted" {
331 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv8i16")]
332 fn _svabalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
333 }
334 unsafe { _svabalt_s16(op1, op2, op3) }
335}
336#[doc = "Absolute difference long (top)"]
337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s16])"]
338#[inline(always)]
339#[target_feature(enable = "sve,sve2")]
340#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
341#[cfg_attr(test, assert_instr(sabalt))]
342pub fn svabalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
343 svabalt_s16(op1, op2, svdup_n_s8(op3))
344}
345#[doc = "Absolute difference long (top)"]
346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s32])"]
347#[inline(always)]
348#[target_feature(enable = "sve,sve2")]
349#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
350#[cfg_attr(test, assert_instr(sabalt))]
351pub fn svabalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
352 unsafe extern "unadjusted" {
353 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv4i32")]
354 fn _svabalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
355 }
356 unsafe { _svabalt_s32(op1, op2, op3) }
357}
358#[doc = "Absolute difference long (top)"]
359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s32])"]
360#[inline(always)]
361#[target_feature(enable = "sve,sve2")]
362#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
363#[cfg_attr(test, assert_instr(sabalt))]
364pub fn svabalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
365 svabalt_s32(op1, op2, svdup_n_s16(op3))
366}
367#[doc = "Absolute difference long (top)"]
368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s64])"]
369#[inline(always)]
370#[target_feature(enable = "sve,sve2")]
371#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
372#[cfg_attr(test, assert_instr(sabalt))]
373pub fn svabalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
374 unsafe extern "unadjusted" {
375 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv2i64")]
376 fn _svabalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
377 }
378 unsafe { _svabalt_s64(op1, op2, op3) }
379}
380#[doc = "Absolute difference long (top)"]
381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s64])"]
382#[inline(always)]
383#[target_feature(enable = "sve,sve2")]
384#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
385#[cfg_attr(test, assert_instr(sabalt))]
386pub fn svabalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
387 svabalt_s64(op1, op2, svdup_n_s32(op3))
388}
389#[doc = "Absolute difference long (top)"]
390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u16])"]
391#[inline(always)]
392#[target_feature(enable = "sve,sve2")]
393#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
394#[cfg_attr(test, assert_instr(uabalt))]
395pub fn svabalt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t {
396 unsafe extern "unadjusted" {
397 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv8i16")]
398 fn _svabalt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
399 }
400 unsafe { _svabalt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
401}
402#[doc = "Absolute difference long (top)"]
403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u16])"]
404#[inline(always)]
405#[target_feature(enable = "sve,sve2")]
406#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
407#[cfg_attr(test, assert_instr(uabalt))]
408pub fn svabalt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t {
409 svabalt_u16(op1, op2, svdup_n_u8(op3))
410}
411#[doc = "Absolute difference long (top)"]
412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u32])"]
413#[inline(always)]
414#[target_feature(enable = "sve,sve2")]
415#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
416#[cfg_attr(test, assert_instr(uabalt))]
417pub fn svabalt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t {
418 unsafe extern "unadjusted" {
419 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv4i32")]
420 fn _svabalt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
421 }
422 unsafe { _svabalt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
423}
424#[doc = "Absolute difference long (top)"]
425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u32])"]
426#[inline(always)]
427#[target_feature(enable = "sve,sve2")]
428#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
429#[cfg_attr(test, assert_instr(uabalt))]
430pub fn svabalt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t {
431 svabalt_u32(op1, op2, svdup_n_u16(op3))
432}
433#[doc = "Absolute difference long (top)"]
434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u64])"]
435#[inline(always)]
436#[target_feature(enable = "sve,sve2")]
437#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
438#[cfg_attr(test, assert_instr(uabalt))]
439pub fn svabalt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t {
440 unsafe extern "unadjusted" {
441 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv2i64")]
442 fn _svabalt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
443 }
444 unsafe { _svabalt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
445}
446#[doc = "Absolute difference long (top)"]
447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u64])"]
448#[inline(always)]
449#[target_feature(enable = "sve,sve2")]
450#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
451#[cfg_attr(test, assert_instr(uabalt))]
452pub fn svabalt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t {
453 svabalt_u64(op1, op2, svdup_n_u32(op3))
454}
455#[doc = "Absolute difference long (bottom)"]
456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s16])"]
457#[inline(always)]
458#[target_feature(enable = "sve,sve2")]
459#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
460#[cfg_attr(test, assert_instr(sabdlb))]
461pub fn svabdlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
462 unsafe extern "unadjusted" {
463 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv8i16")]
464 fn _svabdlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
465 }
466 unsafe { _svabdlb_s16(op1, op2) }
467}
468#[doc = "Absolute difference long (bottom)"]
469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s16])"]
470#[inline(always)]
471#[target_feature(enable = "sve,sve2")]
472#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
473#[cfg_attr(test, assert_instr(sabdlb))]
474pub fn svabdlb_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
475 svabdlb_s16(op1, svdup_n_s8(op2))
476}
477#[doc = "Absolute difference long (bottom)"]
478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s32])"]
479#[inline(always)]
480#[target_feature(enable = "sve,sve2")]
481#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
482#[cfg_attr(test, assert_instr(sabdlb))]
483pub fn svabdlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
484 unsafe extern "unadjusted" {
485 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv4i32")]
486 fn _svabdlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
487 }
488 unsafe { _svabdlb_s32(op1, op2) }
489}
490#[doc = "Absolute difference long (bottom)"]
491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s32])"]
492#[inline(always)]
493#[target_feature(enable = "sve,sve2")]
494#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
495#[cfg_attr(test, assert_instr(sabdlb))]
496pub fn svabdlb_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
497 svabdlb_s32(op1, svdup_n_s16(op2))
498}
499#[doc = "Absolute difference long (bottom)"]
500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s64])"]
501#[inline(always)]
502#[target_feature(enable = "sve,sve2")]
503#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
504#[cfg_attr(test, assert_instr(sabdlb))]
505pub fn svabdlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
506 unsafe extern "unadjusted" {
507 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv2i64")]
508 fn _svabdlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
509 }
510 unsafe { _svabdlb_s64(op1, op2) }
511}
512#[doc = "Absolute difference long (bottom)"]
513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s64])"]
514#[inline(always)]
515#[target_feature(enable = "sve,sve2")]
516#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
517#[cfg_attr(test, assert_instr(sabdlb))]
518pub fn svabdlb_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
519 svabdlb_s64(op1, svdup_n_s32(op2))
520}
521#[doc = "Absolute difference long (bottom)"]
522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u16])"]
523#[inline(always)]
524#[target_feature(enable = "sve,sve2")]
525#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
526#[cfg_attr(test, assert_instr(uabdlb))]
527pub fn svabdlb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
528 unsafe extern "unadjusted" {
529 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv8i16")]
530 fn _svabdlb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t;
531 }
532 unsafe { _svabdlb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
533}
534#[doc = "Absolute difference long (bottom)"]
535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u16])"]
536#[inline(always)]
537#[target_feature(enable = "sve,sve2")]
538#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
539#[cfg_attr(test, assert_instr(uabdlb))]
540pub fn svabdlb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
541 svabdlb_u16(op1, svdup_n_u8(op2))
542}
543#[doc = "Absolute difference long (bottom)"]
544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u32])"]
545#[inline(always)]
546#[target_feature(enable = "sve,sve2")]
547#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
548#[cfg_attr(test, assert_instr(uabdlb))]
549pub fn svabdlb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
550 unsafe extern "unadjusted" {
551 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv4i32")]
552 fn _svabdlb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t;
553 }
554 unsafe { _svabdlb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
555}
556#[doc = "Absolute difference long (bottom)"]
557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u32])"]
558#[inline(always)]
559#[target_feature(enable = "sve,sve2")]
560#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
561#[cfg_attr(test, assert_instr(uabdlb))]
562pub fn svabdlb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t {
563 svabdlb_u32(op1, svdup_n_u16(op2))
564}
565#[doc = "Absolute difference long (bottom)"]
566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u64])"]
567#[inline(always)]
568#[target_feature(enable = "sve,sve2")]
569#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
570#[cfg_attr(test, assert_instr(uabdlb))]
571pub fn svabdlb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
572 unsafe extern "unadjusted" {
573 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv2i64")]
574 fn _svabdlb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t;
575 }
576 unsafe { _svabdlb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
577}
578#[doc = "Absolute difference long (bottom)"]
579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u64])"]
580#[inline(always)]
581#[target_feature(enable = "sve,sve2")]
582#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
583#[cfg_attr(test, assert_instr(uabdlb))]
584pub fn svabdlb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
585 svabdlb_u64(op1, svdup_n_u32(op2))
586}
587#[doc = "Absolute difference long (top)"]
588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s16])"]
589#[inline(always)]
590#[target_feature(enable = "sve,sve2")]
591#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
592#[cfg_attr(test, assert_instr(sabdlt))]
593pub fn svabdlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
594 unsafe extern "unadjusted" {
595 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv8i16")]
596 fn _svabdlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
597 }
598 unsafe { _svabdlt_s16(op1, op2) }
599}
600#[doc = "Absolute difference long (top)"]
601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s16])"]
602#[inline(always)]
603#[target_feature(enable = "sve,sve2")]
604#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
605#[cfg_attr(test, assert_instr(sabdlt))]
606pub fn svabdlt_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
607 svabdlt_s16(op1, svdup_n_s8(op2))
608}
609#[doc = "Absolute difference long (top)"]
610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s32])"]
611#[inline(always)]
612#[target_feature(enable = "sve,sve2")]
613#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
614#[cfg_attr(test, assert_instr(sabdlt))]
615pub fn svabdlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
616 unsafe extern "unadjusted" {
617 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv4i32")]
618 fn _svabdlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
619 }
620 unsafe { _svabdlt_s32(op1, op2) }
621}
622#[doc = "Absolute difference long (top)"]
623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s32])"]
624#[inline(always)]
625#[target_feature(enable = "sve,sve2")]
626#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
627#[cfg_attr(test, assert_instr(sabdlt))]
628pub fn svabdlt_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
629 svabdlt_s32(op1, svdup_n_s16(op2))
630}
631#[doc = "Absolute difference long (top)"]
632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s64])"]
633#[inline(always)]
634#[target_feature(enable = "sve,sve2")]
635#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
636#[cfg_attr(test, assert_instr(sabdlt))]
637pub fn svabdlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
638 unsafe extern "unadjusted" {
639 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv2i64")]
640 fn _svabdlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
641 }
642 unsafe { _svabdlt_s64(op1, op2) }
643}
644#[doc = "Absolute difference long (top)"]
645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s64])"]
646#[inline(always)]
647#[target_feature(enable = "sve,sve2")]
648#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
649#[cfg_attr(test, assert_instr(sabdlt))]
650pub fn svabdlt_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
651 svabdlt_s64(op1, svdup_n_s32(op2))
652}
653#[doc = "Absolute difference long (top)"]
654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u16])"]
655#[inline(always)]
656#[target_feature(enable = "sve,sve2")]
657#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
658#[cfg_attr(test, assert_instr(uabdlt))]
659pub fn svabdlt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
660 unsafe extern "unadjusted" {
661 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv8i16")]
662 fn _svabdlt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t;
663 }
664 unsafe { _svabdlt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
665}
666#[doc = "Absolute difference long (top)"]
667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u16])"]
668#[inline(always)]
669#[target_feature(enable = "sve,sve2")]
670#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
671#[cfg_attr(test, assert_instr(uabdlt))]
672pub fn svabdlt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
673 svabdlt_u16(op1, svdup_n_u8(op2))
674}
675#[doc = "Absolute difference long (top)"]
676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u32])"]
677#[inline(always)]
678#[target_feature(enable = "sve,sve2")]
679#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
680#[cfg_attr(test, assert_instr(uabdlt))]
681pub fn svabdlt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
682 unsafe extern "unadjusted" {
683 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv4i32")]
684 fn _svabdlt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t;
685 }
686 unsafe { _svabdlt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
687}
688#[doc = "Absolute difference long (top)"]
689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u32])"]
690#[inline(always)]
691#[target_feature(enable = "sve,sve2")]
692#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
693#[cfg_attr(test, assert_instr(uabdlt))]
694pub fn svabdlt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t {
695 svabdlt_u32(op1, svdup_n_u16(op2))
696}
697#[doc = "Absolute difference long (top)"]
698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u64])"]
699#[inline(always)]
700#[target_feature(enable = "sve,sve2")]
701#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
702#[cfg_attr(test, assert_instr(uabdlt))]
703pub fn svabdlt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
704 unsafe extern "unadjusted" {
705 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv2i64")]
706 fn _svabdlt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t;
707 }
708 unsafe { _svabdlt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
709}
710#[doc = "Absolute difference long (top)"]
711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u64])"]
712#[inline(always)]
713#[target_feature(enable = "sve,sve2")]
714#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
715#[cfg_attr(test, assert_instr(uabdlt))]
716pub fn svabdlt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
717 svabdlt_u64(op1, svdup_n_u32(op2))
718}
719#[doc = "Add and accumulate long pairwise"]
720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_m)"]
721#[inline(always)]
722#[target_feature(enable = "sve,sve2")]
723#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
724#[cfg_attr(test, assert_instr(sadalp))]
725pub fn svadalp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t {
726 unsafe extern "unadjusted" {
727 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv8i16")]
728 fn _svadalp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint8_t) -> svint16_t;
729 }
730 unsafe { _svadalp_s16_m(pg.sve_into(), op1, op2) }
731}
732#[doc = "Add and accumulate long pairwise"]
733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_x)"]
734#[inline(always)]
735#[target_feature(enable = "sve,sve2")]
736#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
737#[cfg_attr(test, assert_instr(sadalp))]
738pub fn svadalp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t {
739 svadalp_s16_m(pg, op1, op2)
740}
741#[doc = "Add and accumulate long pairwise"]
742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_z)"]
743#[inline(always)]
744#[target_feature(enable = "sve,sve2")]
745#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
746#[cfg_attr(test, assert_instr(sadalp))]
747pub fn svadalp_s16_z(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t {
748 svadalp_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
749}
750#[doc = "Add and accumulate long pairwise"]
751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_m)"]
752#[inline(always)]
753#[target_feature(enable = "sve,sve2")]
754#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
755#[cfg_attr(test, assert_instr(sadalp))]
756pub fn svadalp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t {
757 unsafe extern "unadjusted" {
758 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv4i32")]
759 fn _svadalp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint16_t) -> svint32_t;
760 }
761 unsafe { _svadalp_s32_m(pg.sve_into(), op1, op2) }
762}
763#[doc = "Add and accumulate long pairwise"]
764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_x)"]
765#[inline(always)]
766#[target_feature(enable = "sve,sve2")]
767#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
768#[cfg_attr(test, assert_instr(sadalp))]
769pub fn svadalp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t {
770 svadalp_s32_m(pg, op1, op2)
771}
772#[doc = "Add and accumulate long pairwise"]
773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_z)"]
774#[inline(always)]
775#[target_feature(enable = "sve,sve2")]
776#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
777#[cfg_attr(test, assert_instr(sadalp))]
778pub fn svadalp_s32_z(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t {
779 svadalp_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
780}
781#[doc = "Add and accumulate long pairwise"]
782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_m)"]
783#[inline(always)]
784#[target_feature(enable = "sve,sve2")]
785#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
786#[cfg_attr(test, assert_instr(sadalp))]
787pub fn svadalp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t {
788 unsafe extern "unadjusted" {
789 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv2i64")]
790 fn _svadalp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint32_t) -> svint64_t;
791 }
792 unsafe { _svadalp_s64_m(pg.sve_into(), op1, op2) }
793}
794#[doc = "Add and accumulate long pairwise"]
795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_x)"]
796#[inline(always)]
797#[target_feature(enable = "sve,sve2")]
798#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
799#[cfg_attr(test, assert_instr(sadalp))]
800pub fn svadalp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t {
801 svadalp_s64_m(pg, op1, op2)
802}
803#[doc = "Add and accumulate long pairwise"]
804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_z)"]
805#[inline(always)]
806#[target_feature(enable = "sve,sve2")]
807#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
808#[cfg_attr(test, assert_instr(sadalp))]
809pub fn svadalp_s64_z(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t {
810 svadalp_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
811}
812#[doc = "Add and accumulate long pairwise"]
813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_m)"]
814#[inline(always)]
815#[target_feature(enable = "sve,sve2")]
816#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
817#[cfg_attr(test, assert_instr(uadalp))]
818pub fn svadalp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t {
819 unsafe extern "unadjusted" {
820 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv8i16")]
821 fn _svadalp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint8_t) -> svint16_t;
822 }
823 unsafe { _svadalp_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
824}
825#[doc = "Add and accumulate long pairwise"]
826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_x)"]
827#[inline(always)]
828#[target_feature(enable = "sve,sve2")]
829#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
830#[cfg_attr(test, assert_instr(uadalp))]
831pub fn svadalp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t {
832 svadalp_u16_m(pg, op1, op2)
833}
834#[doc = "Add and accumulate long pairwise"]
835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_z)"]
836#[inline(always)]
837#[target_feature(enable = "sve,sve2")]
838#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
839#[cfg_attr(test, assert_instr(uadalp))]
840pub fn svadalp_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t {
841 svadalp_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
842}
843#[doc = "Add and accumulate long pairwise"]
844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_m)"]
845#[inline(always)]
846#[target_feature(enable = "sve,sve2")]
847#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
848#[cfg_attr(test, assert_instr(uadalp))]
849pub fn svadalp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t {
850 unsafe extern "unadjusted" {
851 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv4i32")]
852 fn _svadalp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint16_t) -> svint32_t;
853 }
854 unsafe { _svadalp_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
855}
856#[doc = "Add and accumulate long pairwise"]
857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_x)"]
858#[inline(always)]
859#[target_feature(enable = "sve,sve2")]
860#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
861#[cfg_attr(test, assert_instr(uadalp))]
862pub fn svadalp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t {
863 svadalp_u32_m(pg, op1, op2)
864}
865#[doc = "Add and accumulate long pairwise"]
866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_z)"]
867#[inline(always)]
868#[target_feature(enable = "sve,sve2")]
869#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
870#[cfg_attr(test, assert_instr(uadalp))]
871pub fn svadalp_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t {
872 svadalp_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
873}
874#[doc = "Add and accumulate long pairwise"]
875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_m)"]
876#[inline(always)]
877#[target_feature(enable = "sve,sve2")]
878#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
879#[cfg_attr(test, assert_instr(uadalp))]
880pub fn svadalp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t {
881 unsafe extern "unadjusted" {
882 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv2i64")]
883 fn _svadalp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint32_t) -> svint64_t;
884 }
885 unsafe { _svadalp_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
886}
887#[doc = "Add and accumulate long pairwise"]
888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_x)"]
889#[inline(always)]
890#[target_feature(enable = "sve,sve2")]
891#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
892#[cfg_attr(test, assert_instr(uadalp))]
893pub fn svadalp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t {
894 svadalp_u64_m(pg, op1, op2)
895}
896#[doc = "Add and accumulate long pairwise"]
897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_z)"]
898#[inline(always)]
899#[target_feature(enable = "sve,sve2")]
900#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
901#[cfg_attr(test, assert_instr(uadalp))]
902pub fn svadalp_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t {
903 svadalp_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
904}
905#[doc = "Add with carry long (bottom)"]
906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_u32])"]
907#[inline(always)]
908#[target_feature(enable = "sve,sve2")]
909#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
910#[cfg_attr(test, assert_instr(adclb))]
911pub fn svadclb_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
912 unsafe extern "unadjusted" {
913 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclb.nxv4i32")]
914 fn _svadclb_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
915 }
916 unsafe { _svadclb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
917}
918#[doc = "Add with carry long (bottom)"]
919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_n_u32])"]
920#[inline(always)]
921#[target_feature(enable = "sve,sve2")]
922#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
923#[cfg_attr(test, assert_instr(adclb))]
924pub fn svadclb_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
925 svadclb_u32(op1, op2, svdup_n_u32(op3))
926}
927#[doc = "Add with carry long (bottom)"]
928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_u64])"]
929#[inline(always)]
930#[target_feature(enable = "sve,sve2")]
931#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
932#[cfg_attr(test, assert_instr(adclb))]
933pub fn svadclb_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
934 unsafe extern "unadjusted" {
935 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclb.nxv2i64")]
936 fn _svadclb_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
937 }
938 unsafe { _svadclb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
939}
940#[doc = "Add with carry long (bottom)"]
941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_n_u64])"]
942#[inline(always)]
943#[target_feature(enable = "sve,sve2")]
944#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
945#[cfg_attr(test, assert_instr(adclb))]
946pub fn svadclb_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
947 svadclb_u64(op1, op2, svdup_n_u64(op3))
948}
949#[doc = "Add with carry long (top)"]
950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_u32])"]
951#[inline(always)]
952#[target_feature(enable = "sve,sve2")]
953#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
954#[cfg_attr(test, assert_instr(adclt))]
955pub fn svadclt_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
956 unsafe extern "unadjusted" {
957 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclt.nxv4i32")]
958 fn _svadclt_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
959 }
960 unsafe { _svadclt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
961}
962#[doc = "Add with carry long (top)"]
963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_n_u32])"]
964#[inline(always)]
965#[target_feature(enable = "sve,sve2")]
966#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
967#[cfg_attr(test, assert_instr(adclt))]
968pub fn svadclt_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
969 svadclt_u32(op1, op2, svdup_n_u32(op3))
970}
971#[doc = "Add with carry long (top)"]
972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_u64])"]
973#[inline(always)]
974#[target_feature(enable = "sve,sve2")]
975#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
976#[cfg_attr(test, assert_instr(adclt))]
977pub fn svadclt_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
978 unsafe extern "unadjusted" {
979 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclt.nxv2i64")]
980 fn _svadclt_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
981 }
982 unsafe { _svadclt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
983}
984#[doc = "Add with carry long (top)"]
985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_n_u64])"]
986#[inline(always)]
987#[target_feature(enable = "sve,sve2")]
988#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
989#[cfg_attr(test, assert_instr(adclt))]
990pub fn svadclt_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
991 svadclt_u64(op1, op2, svdup_n_u64(op3))
992}
993#[doc = "Add narrow high part (bottom)"]
994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s16])"]
995#[inline(always)]
996#[target_feature(enable = "sve,sve2")]
997#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
998#[cfg_attr(test, assert_instr(addhnb))]
999pub fn svaddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t {
1000 unsafe extern "unadjusted" {
1001 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv8i16")]
1002 fn _svaddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t;
1003 }
1004 unsafe { _svaddhnb_s16(op1, op2) }
1005}
1006#[doc = "Add narrow high part (bottom)"]
1007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s16])"]
1008#[inline(always)]
1009#[target_feature(enable = "sve,sve2")]
1010#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1011#[cfg_attr(test, assert_instr(addhnb))]
1012pub fn svaddhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t {
1013 svaddhnb_s16(op1, svdup_n_s16(op2))
1014}
1015#[doc = "Add narrow high part (bottom)"]
1016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s32])"]
1017#[inline(always)]
1018#[target_feature(enable = "sve,sve2")]
1019#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1020#[cfg_attr(test, assert_instr(addhnb))]
1021pub fn svaddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t {
1022 unsafe extern "unadjusted" {
1023 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv4i32")]
1024 fn _svaddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t;
1025 }
1026 unsafe { _svaddhnb_s32(op1, op2) }
1027}
1028#[doc = "Add narrow high part (bottom)"]
1029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s32])"]
1030#[inline(always)]
1031#[target_feature(enable = "sve,sve2")]
1032#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1033#[cfg_attr(test, assert_instr(addhnb))]
1034pub fn svaddhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t {
1035 svaddhnb_s32(op1, svdup_n_s32(op2))
1036}
1037#[doc = "Add narrow high part (bottom)"]
1038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s64])"]
1039#[inline(always)]
1040#[target_feature(enable = "sve,sve2")]
1041#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1042#[cfg_attr(test, assert_instr(addhnb))]
1043pub fn svaddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t {
1044 unsafe extern "unadjusted" {
1045 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv2i64")]
1046 fn _svaddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t;
1047 }
1048 unsafe { _svaddhnb_s64(op1, op2) }
1049}
1050#[doc = "Add narrow high part (bottom)"]
1051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s64])"]
1052#[inline(always)]
1053#[target_feature(enable = "sve,sve2")]
1054#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1055#[cfg_attr(test, assert_instr(addhnb))]
1056pub fn svaddhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t {
1057 svaddhnb_s64(op1, svdup_n_s64(op2))
1058}
1059#[doc = "Add narrow high part (bottom)"]
1060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u16])"]
1061#[inline(always)]
1062#[target_feature(enable = "sve,sve2")]
1063#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1064#[cfg_attr(test, assert_instr(addhnb))]
1065pub fn svaddhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t {
1066 unsafe { svaddhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
1067}
1068#[doc = "Add narrow high part (bottom)"]
1069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u16])"]
1070#[inline(always)]
1071#[target_feature(enable = "sve,sve2")]
1072#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1073#[cfg_attr(test, assert_instr(addhnb))]
1074pub fn svaddhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t {
1075 svaddhnb_u16(op1, svdup_n_u16(op2))
1076}
1077#[doc = "Add narrow high part (bottom)"]
1078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u32])"]
1079#[inline(always)]
1080#[target_feature(enable = "sve,sve2")]
1081#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1082#[cfg_attr(test, assert_instr(addhnb))]
1083pub fn svaddhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t {
1084 unsafe { svaddhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
1085}
1086#[doc = "Add narrow high part (bottom)"]
1087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u32])"]
1088#[inline(always)]
1089#[target_feature(enable = "sve,sve2")]
1090#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1091#[cfg_attr(test, assert_instr(addhnb))]
1092pub fn svaddhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t {
1093 svaddhnb_u32(op1, svdup_n_u32(op2))
1094}
1095#[doc = "Add narrow high part (bottom)"]
1096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u64])"]
1097#[inline(always)]
1098#[target_feature(enable = "sve,sve2")]
1099#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1100#[cfg_attr(test, assert_instr(addhnb))]
1101pub fn svaddhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t {
1102 unsafe { svaddhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
1103}
1104#[doc = "Add narrow high part (bottom)"]
1105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u64])"]
1106#[inline(always)]
1107#[target_feature(enable = "sve,sve2")]
1108#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1109#[cfg_attr(test, assert_instr(addhnb))]
1110pub fn svaddhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t {
1111 svaddhnb_u64(op1, svdup_n_u64(op2))
1112}
1113#[doc = "Add narrow high part (top)"]
1114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s16])"]
1115#[inline(always)]
1116#[target_feature(enable = "sve,sve2")]
1117#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1118#[cfg_attr(test, assert_instr(addhnt))]
1119pub fn svaddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t {
1120 unsafe extern "unadjusted" {
1121 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv8i16")]
1122 fn _svaddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t;
1123 }
1124 unsafe { _svaddhnt_s16(even, op1, op2) }
1125}
1126#[doc = "Add narrow high part (top)"]
1127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s16])"]
1128#[inline(always)]
1129#[target_feature(enable = "sve,sve2")]
1130#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1131#[cfg_attr(test, assert_instr(addhnt))]
1132pub fn svaddhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t {
1133 svaddhnt_s16(even, op1, svdup_n_s16(op2))
1134}
1135#[doc = "Add narrow high part (top)"]
1136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s32])"]
1137#[inline(always)]
1138#[target_feature(enable = "sve,sve2")]
1139#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1140#[cfg_attr(test, assert_instr(addhnt))]
1141pub fn svaddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t {
1142 unsafe extern "unadjusted" {
1143 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv4i32")]
1144 fn _svaddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t;
1145 }
1146 unsafe { _svaddhnt_s32(even, op1, op2) }
1147}
1148#[doc = "Add narrow high part (top)"]
1149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s32])"]
1150#[inline(always)]
1151#[target_feature(enable = "sve,sve2")]
1152#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1153#[cfg_attr(test, assert_instr(addhnt))]
1154pub fn svaddhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t {
1155 svaddhnt_s32(even, op1, svdup_n_s32(op2))
1156}
1157#[doc = "Add narrow high part (top)"]
1158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s64])"]
1159#[inline(always)]
1160#[target_feature(enable = "sve,sve2")]
1161#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1162#[cfg_attr(test, assert_instr(addhnt))]
1163pub fn svaddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t {
1164 unsafe extern "unadjusted" {
1165 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv2i64")]
1166 fn _svaddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t;
1167 }
1168 unsafe { _svaddhnt_s64(even, op1, op2) }
1169}
1170#[doc = "Add narrow high part (top)"]
1171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s64])"]
1172#[inline(always)]
1173#[target_feature(enable = "sve,sve2")]
1174#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1175#[cfg_attr(test, assert_instr(addhnt))]
1176pub fn svaddhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t {
1177 svaddhnt_s64(even, op1, svdup_n_s64(op2))
1178}
1179#[doc = "Add narrow high part (top)"]
1180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u16])"]
1181#[inline(always)]
1182#[target_feature(enable = "sve,sve2")]
1183#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1184#[cfg_attr(test, assert_instr(addhnt))]
1185pub fn svaddhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t {
1186 unsafe { svaddhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
1187}
1188#[doc = "Add narrow high part (top)"]
1189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u16])"]
1190#[inline(always)]
1191#[target_feature(enable = "sve,sve2")]
1192#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1193#[cfg_attr(test, assert_instr(addhnt))]
1194pub fn svaddhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t {
1195 svaddhnt_u16(even, op1, svdup_n_u16(op2))
1196}
1197#[doc = "Add narrow high part (top)"]
1198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u32])"]
1199#[inline(always)]
1200#[target_feature(enable = "sve,sve2")]
1201#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1202#[cfg_attr(test, assert_instr(addhnt))]
1203pub fn svaddhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t {
1204 unsafe { svaddhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
1205}
1206#[doc = "Add narrow high part (top)"]
1207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u32])"]
1208#[inline(always)]
1209#[target_feature(enable = "sve,sve2")]
1210#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1211#[cfg_attr(test, assert_instr(addhnt))]
1212pub fn svaddhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t {
1213 svaddhnt_u32(even, op1, svdup_n_u32(op2))
1214}
1215#[doc = "Add narrow high part (top)"]
1216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u64])"]
1217#[inline(always)]
1218#[target_feature(enable = "sve,sve2")]
1219#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1220#[cfg_attr(test, assert_instr(addhnt))]
1221pub fn svaddhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t {
1222 unsafe { svaddhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
1223}
1224#[doc = "Add narrow high part (top)"]
1225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u64])"]
1226#[inline(always)]
1227#[target_feature(enable = "sve,sve2")]
1228#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1229#[cfg_attr(test, assert_instr(addhnt))]
1230pub fn svaddhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t {
1231 svaddhnt_u64(even, op1, svdup_n_u64(op2))
1232}
1233#[doc = "Add long (bottom)"]
1234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s16])"]
1235#[inline(always)]
1236#[target_feature(enable = "sve,sve2")]
1237#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1238#[cfg_attr(test, assert_instr(saddlb))]
1239pub fn svaddlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
1240 unsafe extern "unadjusted" {
1241 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv8i16")]
1242 fn _svaddlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
1243 }
1244 unsafe { _svaddlb_s16(op1, op2) }
1245}
1246#[doc = "Add long (bottom)"]
1247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s16])"]
1248#[inline(always)]
1249#[target_feature(enable = "sve,sve2")]
1250#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1251#[cfg_attr(test, assert_instr(saddlb))]
1252pub fn svaddlb_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
1253 svaddlb_s16(op1, svdup_n_s8(op2))
1254}
1255#[doc = "Add long (bottom)"]
1256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s32])"]
1257#[inline(always)]
1258#[target_feature(enable = "sve,sve2")]
1259#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1260#[cfg_attr(test, assert_instr(saddlb))]
1261pub fn svaddlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
1262 unsafe extern "unadjusted" {
1263 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv4i32")]
1264 fn _svaddlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
1265 }
1266 unsafe { _svaddlb_s32(op1, op2) }
1267}
1268#[doc = "Add long (bottom)"]
1269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s32])"]
1270#[inline(always)]
1271#[target_feature(enable = "sve,sve2")]
1272#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1273#[cfg_attr(test, assert_instr(saddlb))]
1274pub fn svaddlb_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
1275 svaddlb_s32(op1, svdup_n_s16(op2))
1276}
1277#[doc = "Add long (bottom)"]
1278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s64])"]
1279#[inline(always)]
1280#[target_feature(enable = "sve,sve2")]
1281#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1282#[cfg_attr(test, assert_instr(saddlb))]
1283pub fn svaddlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
1284 unsafe extern "unadjusted" {
1285 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv2i64")]
1286 fn _svaddlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
1287 }
1288 unsafe { _svaddlb_s64(op1, op2) }
1289}
1290#[doc = "Add long (bottom)"]
1291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s64])"]
1292#[inline(always)]
1293#[target_feature(enable = "sve,sve2")]
1294#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1295#[cfg_attr(test, assert_instr(saddlb))]
1296pub fn svaddlb_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
1297 svaddlb_s64(op1, svdup_n_s32(op2))
1298}
1299#[doc = "Add long (bottom)"]
1300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u16])"]
1301#[inline(always)]
1302#[target_feature(enable = "sve,sve2")]
1303#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1304#[cfg_attr(test, assert_instr(uaddlb))]
1305pub fn svaddlb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
1306 unsafe extern "unadjusted" {
1307 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv8i16")]
1308 fn _svaddlb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t;
1309 }
1310 unsafe { _svaddlb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
1311}
1312#[doc = "Add long (bottom)"]
1313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u16])"]
1314#[inline(always)]
1315#[target_feature(enable = "sve,sve2")]
1316#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1317#[cfg_attr(test, assert_instr(uaddlb))]
1318pub fn svaddlb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
1319 svaddlb_u16(op1, svdup_n_u8(op2))
1320}
1321#[doc = "Add long (bottom)"]
1322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u32])"]
1323#[inline(always)]
1324#[target_feature(enable = "sve,sve2")]
1325#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1326#[cfg_attr(test, assert_instr(uaddlb))]
1327pub fn svaddlb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
1328 unsafe extern "unadjusted" {
1329 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv4i32")]
1330 fn _svaddlb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t;
1331 }
1332 unsafe { _svaddlb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
1333}
1334#[doc = "Add long (bottom)"]
1335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u32])"]
1336#[inline(always)]
1337#[target_feature(enable = "sve,sve2")]
1338#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1339#[cfg_attr(test, assert_instr(uaddlb))]
1340pub fn svaddlb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t {
1341 svaddlb_u32(op1, svdup_n_u16(op2))
1342}
1343#[doc = "Add long (bottom)"]
1344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u64])"]
1345#[inline(always)]
1346#[target_feature(enable = "sve,sve2")]
1347#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1348#[cfg_attr(test, assert_instr(uaddlb))]
1349pub fn svaddlb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
1350 unsafe extern "unadjusted" {
1351 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv2i64")]
1352 fn _svaddlb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t;
1353 }
1354 unsafe { _svaddlb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
1355}
1356#[doc = "Add long (bottom)"]
1357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u64])"]
1358#[inline(always)]
1359#[target_feature(enable = "sve,sve2")]
1360#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1361#[cfg_attr(test, assert_instr(uaddlb))]
1362pub fn svaddlb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
1363 svaddlb_u64(op1, svdup_n_u32(op2))
1364}
1365#[doc = "Add long (bottom + top)"]
1366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s16])"]
1367#[inline(always)]
1368#[target_feature(enable = "sve,sve2")]
1369#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1370#[cfg_attr(test, assert_instr(saddlbt))]
1371pub fn svaddlbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
1372 unsafe extern "unadjusted" {
1373 #[cfg_attr(
1374 target_arch = "aarch64",
1375 link_name = "llvm.aarch64.sve.saddlbt.nxv8i16"
1376 )]
1377 fn _svaddlbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
1378 }
1379 unsafe { _svaddlbt_s16(op1, op2) }
1380}
1381#[doc = "Add long (bottom + top)"]
1382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s16])"]
1383#[inline(always)]
1384#[target_feature(enable = "sve,sve2")]
1385#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1386#[cfg_attr(test, assert_instr(saddlbt))]
1387pub fn svaddlbt_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
1388 svaddlbt_s16(op1, svdup_n_s8(op2))
1389}
1390#[doc = "Add long (bottom + top)"]
1391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s32])"]
1392#[inline(always)]
1393#[target_feature(enable = "sve,sve2")]
1394#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1395#[cfg_attr(test, assert_instr(saddlbt))]
1396pub fn svaddlbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
1397 unsafe extern "unadjusted" {
1398 #[cfg_attr(
1399 target_arch = "aarch64",
1400 link_name = "llvm.aarch64.sve.saddlbt.nxv4i32"
1401 )]
1402 fn _svaddlbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
1403 }
1404 unsafe { _svaddlbt_s32(op1, op2) }
1405}
1406#[doc = "Add long (bottom + top)"]
1407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s32])"]
1408#[inline(always)]
1409#[target_feature(enable = "sve,sve2")]
1410#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1411#[cfg_attr(test, assert_instr(saddlbt))]
1412pub fn svaddlbt_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
1413 svaddlbt_s32(op1, svdup_n_s16(op2))
1414}
1415#[doc = "Add long (bottom + top)"]
1416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s64])"]
1417#[inline(always)]
1418#[target_feature(enable = "sve,sve2")]
1419#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1420#[cfg_attr(test, assert_instr(saddlbt))]
1421pub fn svaddlbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
1422 unsafe extern "unadjusted" {
1423 #[cfg_attr(
1424 target_arch = "aarch64",
1425 link_name = "llvm.aarch64.sve.saddlbt.nxv2i64"
1426 )]
1427 fn _svaddlbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
1428 }
1429 unsafe { _svaddlbt_s64(op1, op2) }
1430}
1431#[doc = "Add long (bottom + top)"]
1432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s64])"]
1433#[inline(always)]
1434#[target_feature(enable = "sve,sve2")]
1435#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1436#[cfg_attr(test, assert_instr(saddlbt))]
1437pub fn svaddlbt_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
1438 svaddlbt_s64(op1, svdup_n_s32(op2))
1439}
1440#[doc = "Add long (top)"]
1441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s16])"]
1442#[inline(always)]
1443#[target_feature(enable = "sve,sve2")]
1444#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1445#[cfg_attr(test, assert_instr(saddlt))]
1446pub fn svaddlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
1447 unsafe extern "unadjusted" {
1448 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv8i16")]
1449 fn _svaddlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
1450 }
1451 unsafe { _svaddlt_s16(op1, op2) }
1452}
1453#[doc = "Add long (top)"]
1454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s16])"]
1455#[inline(always)]
1456#[target_feature(enable = "sve,sve2")]
1457#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1458#[cfg_attr(test, assert_instr(saddlt))]
1459pub fn svaddlt_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
1460 svaddlt_s16(op1, svdup_n_s8(op2))
1461}
1462#[doc = "Add long (top)"]
1463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s32])"]
1464#[inline(always)]
1465#[target_feature(enable = "sve,sve2")]
1466#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1467#[cfg_attr(test, assert_instr(saddlt))]
1468pub fn svaddlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
1469 unsafe extern "unadjusted" {
1470 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv4i32")]
1471 fn _svaddlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
1472 }
1473 unsafe { _svaddlt_s32(op1, op2) }
1474}
1475#[doc = "Add long (top)"]
1476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s32])"]
1477#[inline(always)]
1478#[target_feature(enable = "sve,sve2")]
1479#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1480#[cfg_attr(test, assert_instr(saddlt))]
1481pub fn svaddlt_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
1482 svaddlt_s32(op1, svdup_n_s16(op2))
1483}
1484#[doc = "Add long (top)"]
1485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s64])"]
1486#[inline(always)]
1487#[target_feature(enable = "sve,sve2")]
1488#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1489#[cfg_attr(test, assert_instr(saddlt))]
1490pub fn svaddlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
1491 unsafe extern "unadjusted" {
1492 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv2i64")]
1493 fn _svaddlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
1494 }
1495 unsafe { _svaddlt_s64(op1, op2) }
1496}
1497#[doc = "Add long (top)"]
1498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s64])"]
1499#[inline(always)]
1500#[target_feature(enable = "sve,sve2")]
1501#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1502#[cfg_attr(test, assert_instr(saddlt))]
1503pub fn svaddlt_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
1504 svaddlt_s64(op1, svdup_n_s32(op2))
1505}
1506#[doc = "Add long (top)"]
1507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u16])"]
1508#[inline(always)]
1509#[target_feature(enable = "sve,sve2")]
1510#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1511#[cfg_attr(test, assert_instr(uaddlt))]
1512pub fn svaddlt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
1513 unsafe extern "unadjusted" {
1514 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv8i16")]
1515 fn _svaddlt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t;
1516 }
1517 unsafe { _svaddlt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
1518}
1519#[doc = "Add long (top)"]
1520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u16])"]
1521#[inline(always)]
1522#[target_feature(enable = "sve,sve2")]
1523#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1524#[cfg_attr(test, assert_instr(uaddlt))]
1525pub fn svaddlt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
1526 svaddlt_u16(op1, svdup_n_u8(op2))
1527}
1528#[doc = "Add long (top)"]
1529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u32])"]
1530#[inline(always)]
1531#[target_feature(enable = "sve,sve2")]
1532#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1533#[cfg_attr(test, assert_instr(uaddlt))]
1534pub fn svaddlt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
1535 unsafe extern "unadjusted" {
1536 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv4i32")]
1537 fn _svaddlt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t;
1538 }
1539 unsafe { _svaddlt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
1540}
1541#[doc = "Add long (top)"]
1542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u32])"]
1543#[inline(always)]
1544#[target_feature(enable = "sve,sve2")]
1545#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1546#[cfg_attr(test, assert_instr(uaddlt))]
1547pub fn svaddlt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t {
1548 svaddlt_u32(op1, svdup_n_u16(op2))
1549}
1550#[doc = "Add long (top)"]
1551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u64])"]
1552#[inline(always)]
1553#[target_feature(enable = "sve,sve2")]
1554#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1555#[cfg_attr(test, assert_instr(uaddlt))]
1556pub fn svaddlt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
1557 unsafe extern "unadjusted" {
1558 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv2i64")]
1559 fn _svaddlt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t;
1560 }
1561 unsafe { _svaddlt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
1562}
1563#[doc = "Add long (top)"]
1564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u64])"]
1565#[inline(always)]
1566#[target_feature(enable = "sve,sve2")]
1567#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1568#[cfg_attr(test, assert_instr(uaddlt))]
1569pub fn svaddlt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
1570 svaddlt_u64(op1, svdup_n_u32(op2))
1571}
1572#[doc = "Add pairwise"]
1573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f32]_m)"]
1574#[inline(always)]
1575#[target_feature(enable = "sve,sve2")]
1576#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1577#[cfg_attr(test, assert_instr(faddp))]
1578pub fn svaddp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
1579 unsafe extern "unadjusted" {
1580 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddp.nxv4f32")]
1581 fn _svaddp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
1582 }
1583 unsafe { _svaddp_f32_m(pg.sve_into(), op1, op2) }
1584}
1585#[doc = "Add pairwise"]
1586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f32]_x)"]
1587#[inline(always)]
1588#[target_feature(enable = "sve,sve2")]
1589#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1590#[cfg_attr(test, assert_instr(faddp))]
1591pub fn svaddp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
1592 svaddp_f32_m(pg, op1, op2)
1593}
1594#[doc = "Add pairwise"]
1595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f64]_m)"]
1596#[inline(always)]
1597#[target_feature(enable = "sve,sve2")]
1598#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1599#[cfg_attr(test, assert_instr(faddp))]
1600pub fn svaddp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
1601 unsafe extern "unadjusted" {
1602 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddp.nxv2f64")]
1603 fn _svaddp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
1604 }
1605 unsafe { _svaddp_f64_m(pg.sve_into(), op1, op2) }
1606}
1607#[doc = "Add pairwise"]
1608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f64]_x)"]
1609#[inline(always)]
1610#[target_feature(enable = "sve,sve2")]
1611#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1612#[cfg_attr(test, assert_instr(faddp))]
1613pub fn svaddp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
1614 svaddp_f64_m(pg, op1, op2)
1615}
1616#[doc = "Add pairwise"]
1617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s8]_m)"]
1618#[inline(always)]
1619#[target_feature(enable = "sve,sve2")]
1620#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1621#[cfg_attr(test, assert_instr(addp))]
1622pub fn svaddp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
1623 unsafe extern "unadjusted" {
1624 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv16i8")]
1625 fn _svaddp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
1626 }
1627 unsafe { _svaddp_s8_m(pg, op1, op2) }
1628}
1629#[doc = "Add pairwise"]
1630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s8]_x)"]
1631#[inline(always)]
1632#[target_feature(enable = "sve,sve2")]
1633#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1634#[cfg_attr(test, assert_instr(addp))]
1635pub fn svaddp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
1636 svaddp_s8_m(pg, op1, op2)
1637}
1638#[doc = "Add pairwise"]
1639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s16]_m)"]
1640#[inline(always)]
1641#[target_feature(enable = "sve,sve2")]
1642#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1643#[cfg_attr(test, assert_instr(addp))]
1644pub fn svaddp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
1645 unsafe extern "unadjusted" {
1646 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv8i16")]
1647 fn _svaddp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
1648 }
1649 unsafe { _svaddp_s16_m(pg.sve_into(), op1, op2) }
1650}
1651#[doc = "Add pairwise"]
1652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s16]_x)"]
1653#[inline(always)]
1654#[target_feature(enable = "sve,sve2")]
1655#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1656#[cfg_attr(test, assert_instr(addp))]
1657pub fn svaddp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
1658 svaddp_s16_m(pg, op1, op2)
1659}
1660#[doc = "Add pairwise"]
1661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s32]_m)"]
1662#[inline(always)]
1663#[target_feature(enable = "sve,sve2")]
1664#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1665#[cfg_attr(test, assert_instr(addp))]
1666pub fn svaddp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
1667 unsafe extern "unadjusted" {
1668 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv4i32")]
1669 fn _svaddp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
1670 }
1671 unsafe { _svaddp_s32_m(pg.sve_into(), op1, op2) }
1672}
1673#[doc = "Add pairwise"]
1674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s32]_x)"]
1675#[inline(always)]
1676#[target_feature(enable = "sve,sve2")]
1677#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1678#[cfg_attr(test, assert_instr(addp))]
1679pub fn svaddp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
1680 svaddp_s32_m(pg, op1, op2)
1681}
1682#[doc = "Add pairwise"]
1683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s64]_m)"]
1684#[inline(always)]
1685#[target_feature(enable = "sve,sve2")]
1686#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1687#[cfg_attr(test, assert_instr(addp))]
1688pub fn svaddp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
1689 unsafe extern "unadjusted" {
1690 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv2i64")]
1691 fn _svaddp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
1692 }
1693 unsafe { _svaddp_s64_m(pg.sve_into(), op1, op2) }
1694}
1695#[doc = "Add pairwise"]
1696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s64]_x)"]
1697#[inline(always)]
1698#[target_feature(enable = "sve,sve2")]
1699#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1700#[cfg_attr(test, assert_instr(addp))]
1701pub fn svaddp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
1702 svaddp_s64_m(pg, op1, op2)
1703}
1704#[doc = "Add pairwise"]
1705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u8]_m)"]
1706#[inline(always)]
1707#[target_feature(enable = "sve,sve2")]
1708#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1709#[cfg_attr(test, assert_instr(addp))]
1710pub fn svaddp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
1711 unsafe { svaddp_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
1712}
1713#[doc = "Add pairwise"]
1714#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u8]_x)"]
1715#[inline(always)]
1716#[target_feature(enable = "sve,sve2")]
1717#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1718#[cfg_attr(test, assert_instr(addp))]
1719pub fn svaddp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
1720 svaddp_u8_m(pg, op1, op2)
1721}
1722#[doc = "Add pairwise"]
1723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u16]_m)"]
1724#[inline(always)]
1725#[target_feature(enable = "sve,sve2")]
1726#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1727#[cfg_attr(test, assert_instr(addp))]
1728pub fn svaddp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
1729 unsafe { svaddp_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
1730}
1731#[doc = "Add pairwise"]
1732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u16]_x)"]
1733#[inline(always)]
1734#[target_feature(enable = "sve,sve2")]
1735#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1736#[cfg_attr(test, assert_instr(addp))]
1737pub fn svaddp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
1738 svaddp_u16_m(pg, op1, op2)
1739}
1740#[doc = "Add pairwise"]
1741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u32]_m)"]
1742#[inline(always)]
1743#[target_feature(enable = "sve,sve2")]
1744#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1745#[cfg_attr(test, assert_instr(addp))]
1746pub fn svaddp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
1747 unsafe { svaddp_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
1748}
1749#[doc = "Add pairwise"]
1750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u32]_x)"]
1751#[inline(always)]
1752#[target_feature(enable = "sve,sve2")]
1753#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1754#[cfg_attr(test, assert_instr(addp))]
1755pub fn svaddp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
1756 svaddp_u32_m(pg, op1, op2)
1757}
1758#[doc = "Add pairwise"]
1759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u64]_m)"]
1760#[inline(always)]
1761#[target_feature(enable = "sve,sve2")]
1762#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1763#[cfg_attr(test, assert_instr(addp))]
1764pub fn svaddp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
1765 unsafe { svaddp_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
1766}
1767#[doc = "Add pairwise"]
1768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u64]_x)"]
1769#[inline(always)]
1770#[target_feature(enable = "sve,sve2")]
1771#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1772#[cfg_attr(test, assert_instr(addp))]
1773pub fn svaddp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
1774 svaddp_u64_m(pg, op1, op2)
1775}
1776#[doc = "Add wide (bottom)"]
1777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s16])"]
1778#[inline(always)]
1779#[target_feature(enable = "sve,sve2")]
1780#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1781#[cfg_attr(test, assert_instr(saddwb))]
1782pub fn svaddwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t {
1783 unsafe extern "unadjusted" {
1784 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv8i16")]
1785 fn _svaddwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t;
1786 }
1787 unsafe { _svaddwb_s16(op1, op2) }
1788}
1789#[doc = "Add wide (bottom)"]
1790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s16])"]
1791#[inline(always)]
1792#[target_feature(enable = "sve,sve2")]
1793#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1794#[cfg_attr(test, assert_instr(saddwb))]
1795pub fn svaddwb_n_s16(op1: svint16_t, op2: i8) -> svint16_t {
1796 svaddwb_s16(op1, svdup_n_s8(op2))
1797}
1798#[doc = "Add wide (bottom)"]
1799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s32])"]
1800#[inline(always)]
1801#[target_feature(enable = "sve,sve2")]
1802#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1803#[cfg_attr(test, assert_instr(saddwb))]
1804pub fn svaddwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t {
1805 unsafe extern "unadjusted" {
1806 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv4i32")]
1807 fn _svaddwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t;
1808 }
1809 unsafe { _svaddwb_s32(op1, op2) }
1810}
1811#[doc = "Add wide (bottom)"]
1812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s32])"]
1813#[inline(always)]
1814#[target_feature(enable = "sve,sve2")]
1815#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1816#[cfg_attr(test, assert_instr(saddwb))]
1817pub fn svaddwb_n_s32(op1: svint32_t, op2: i16) -> svint32_t {
1818 svaddwb_s32(op1, svdup_n_s16(op2))
1819}
1820#[doc = "Add wide (bottom)"]
1821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s64])"]
1822#[inline(always)]
1823#[target_feature(enable = "sve,sve2")]
1824#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1825#[cfg_attr(test, assert_instr(saddwb))]
1826pub fn svaddwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t {
1827 unsafe extern "unadjusted" {
1828 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv2i64")]
1829 fn _svaddwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t;
1830 }
1831 unsafe { _svaddwb_s64(op1, op2) }
1832}
1833#[doc = "Add wide (bottom)"]
1834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s64])"]
1835#[inline(always)]
1836#[target_feature(enable = "sve,sve2")]
1837#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1838#[cfg_attr(test, assert_instr(saddwb))]
1839pub fn svaddwb_n_s64(op1: svint64_t, op2: i32) -> svint64_t {
1840 svaddwb_s64(op1, svdup_n_s32(op2))
1841}
1842#[doc = "Add wide (bottom)"]
1843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u16])"]
1844#[inline(always)]
1845#[target_feature(enable = "sve,sve2")]
1846#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1847#[cfg_attr(test, assert_instr(uaddwb))]
1848pub fn svaddwb_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t {
1849 unsafe extern "unadjusted" {
1850 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv8i16")]
1851 fn _svaddwb_u16(op1: svint16_t, op2: svint8_t) -> svint16_t;
1852 }
1853 unsafe { _svaddwb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
1854}
1855#[doc = "Add wide (bottom)"]
1856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u16])"]
1857#[inline(always)]
1858#[target_feature(enable = "sve,sve2")]
1859#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1860#[cfg_attr(test, assert_instr(uaddwb))]
1861pub fn svaddwb_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t {
1862 svaddwb_u16(op1, svdup_n_u8(op2))
1863}
1864#[doc = "Add wide (bottom)"]
1865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u32])"]
1866#[inline(always)]
1867#[target_feature(enable = "sve,sve2")]
1868#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1869#[cfg_attr(test, assert_instr(uaddwb))]
1870pub fn svaddwb_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t {
1871 unsafe extern "unadjusted" {
1872 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv4i32")]
1873 fn _svaddwb_u32(op1: svint32_t, op2: svint16_t) -> svint32_t;
1874 }
1875 unsafe { _svaddwb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
1876}
1877#[doc = "Add wide (bottom)"]
1878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u32])"]
1879#[inline(always)]
1880#[target_feature(enable = "sve,sve2")]
1881#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1882#[cfg_attr(test, assert_instr(uaddwb))]
1883pub fn svaddwb_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t {
1884 svaddwb_u32(op1, svdup_n_u16(op2))
1885}
1886#[doc = "Add wide (bottom)"]
1887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u64])"]
1888#[inline(always)]
1889#[target_feature(enable = "sve,sve2")]
1890#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1891#[cfg_attr(test, assert_instr(uaddwb))]
1892pub fn svaddwb_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t {
1893 unsafe extern "unadjusted" {
1894 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv2i64")]
1895 fn _svaddwb_u64(op1: svint64_t, op2: svint32_t) -> svint64_t;
1896 }
1897 unsafe { _svaddwb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
1898}
1899#[doc = "Add wide (bottom)"]
1900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u64])"]
1901#[inline(always)]
1902#[target_feature(enable = "sve,sve2")]
1903#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1904#[cfg_attr(test, assert_instr(uaddwb))]
1905pub fn svaddwb_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t {
1906 svaddwb_u64(op1, svdup_n_u32(op2))
1907}
1908#[doc = "Add wide (top)"]
1909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s16])"]
1910#[inline(always)]
1911#[target_feature(enable = "sve,sve2")]
1912#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1913#[cfg_attr(test, assert_instr(saddwt))]
1914pub fn svaddwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t {
1915 unsafe extern "unadjusted" {
1916 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv8i16")]
1917 fn _svaddwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t;
1918 }
1919 unsafe { _svaddwt_s16(op1, op2) }
1920}
1921#[doc = "Add wide (top)"]
1922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s16])"]
1923#[inline(always)]
1924#[target_feature(enable = "sve,sve2")]
1925#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1926#[cfg_attr(test, assert_instr(saddwt))]
1927pub fn svaddwt_n_s16(op1: svint16_t, op2: i8) -> svint16_t {
1928 svaddwt_s16(op1, svdup_n_s8(op2))
1929}
1930#[doc = "Add wide (top)"]
1931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s32])"]
1932#[inline(always)]
1933#[target_feature(enable = "sve,sve2")]
1934#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1935#[cfg_attr(test, assert_instr(saddwt))]
1936pub fn svaddwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t {
1937 unsafe extern "unadjusted" {
1938 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv4i32")]
1939 fn _svaddwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t;
1940 }
1941 unsafe { _svaddwt_s32(op1, op2) }
1942}
1943#[doc = "Add wide (top)"]
1944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s32])"]
1945#[inline(always)]
1946#[target_feature(enable = "sve,sve2")]
1947#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1948#[cfg_attr(test, assert_instr(saddwt))]
1949pub fn svaddwt_n_s32(op1: svint32_t, op2: i16) -> svint32_t {
1950 svaddwt_s32(op1, svdup_n_s16(op2))
1951}
1952#[doc = "Add wide (top)"]
1953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s64])"]
1954#[inline(always)]
1955#[target_feature(enable = "sve,sve2")]
1956#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1957#[cfg_attr(test, assert_instr(saddwt))]
1958pub fn svaddwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t {
1959 unsafe extern "unadjusted" {
1960 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv2i64")]
1961 fn _svaddwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t;
1962 }
1963 unsafe { _svaddwt_s64(op1, op2) }
1964}
1965#[doc = "Add wide (top)"]
1966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s64])"]
1967#[inline(always)]
1968#[target_feature(enable = "sve,sve2")]
1969#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1970#[cfg_attr(test, assert_instr(saddwt))]
1971pub fn svaddwt_n_s64(op1: svint64_t, op2: i32) -> svint64_t {
1972 svaddwt_s64(op1, svdup_n_s32(op2))
1973}
1974#[doc = "Add wide (top)"]
1975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u16])"]
1976#[inline(always)]
1977#[target_feature(enable = "sve,sve2")]
1978#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1979#[cfg_attr(test, assert_instr(uaddwt))]
1980pub fn svaddwt_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t {
1981 unsafe extern "unadjusted" {
1982 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv8i16")]
1983 fn _svaddwt_u16(op1: svint16_t, op2: svint8_t) -> svint16_t;
1984 }
1985 unsafe { _svaddwt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
1986}
1987#[doc = "Add wide (top)"]
1988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u16])"]
1989#[inline(always)]
1990#[target_feature(enable = "sve,sve2")]
1991#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1992#[cfg_attr(test, assert_instr(uaddwt))]
1993pub fn svaddwt_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t {
1994 svaddwt_u16(op1, svdup_n_u8(op2))
1995}
1996#[doc = "Add wide (top)"]
1997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u32])"]
1998#[inline(always)]
1999#[target_feature(enable = "sve,sve2")]
2000#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2001#[cfg_attr(test, assert_instr(uaddwt))]
2002pub fn svaddwt_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t {
2003 unsafe extern "unadjusted" {
2004 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv4i32")]
2005 fn _svaddwt_u32(op1: svint32_t, op2: svint16_t) -> svint32_t;
2006 }
2007 unsafe { _svaddwt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
2008}
2009#[doc = "Add wide (top)"]
2010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u32])"]
2011#[inline(always)]
2012#[target_feature(enable = "sve,sve2")]
2013#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2014#[cfg_attr(test, assert_instr(uaddwt))]
2015pub fn svaddwt_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t {
2016 svaddwt_u32(op1, svdup_n_u16(op2))
2017}
2018#[doc = "Add wide (top)"]
2019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u64])"]
2020#[inline(always)]
2021#[target_feature(enable = "sve,sve2")]
2022#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2023#[cfg_attr(test, assert_instr(uaddwt))]
2024pub fn svaddwt_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t {
2025 unsafe extern "unadjusted" {
2026 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv2i64")]
2027 fn _svaddwt_u64(op1: svint64_t, op2: svint32_t) -> svint64_t;
2028 }
2029 unsafe { _svaddwt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
2030}
2031#[doc = "Add wide (top)"]
2032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u64])"]
2033#[inline(always)]
2034#[target_feature(enable = "sve,sve2")]
2035#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2036#[cfg_attr(test, assert_instr(uaddwt))]
2037pub fn svaddwt_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t {
2038 svaddwt_u64(op1, svdup_n_u32(op2))
2039}
2040#[doc = "AES single round decryption"]
2041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesd[_u8])"]
2042#[inline(always)]
2043#[target_feature(enable = "sve,sve2,sve2-aes")]
2044#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2045#[cfg_attr(test, assert_instr(aesd))]
2046pub fn svaesd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
2047 unsafe extern "unadjusted" {
2048 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesd")]
2049 fn _svaesd_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
2050 }
2051 unsafe { _svaesd_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
2052}
2053#[doc = "AES single round encryption"]
2054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaese[_u8])"]
2055#[inline(always)]
2056#[target_feature(enable = "sve,sve2,sve2-aes")]
2057#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2058#[cfg_attr(test, assert_instr(aese))]
2059pub fn svaese_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
2060 unsafe extern "unadjusted" {
2061 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aese")]
2062 fn _svaese_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
2063 }
2064 unsafe { _svaese_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
2065}
2066#[doc = "AES inverse mix columns"]
2067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesimc[_u8])"]
2068#[inline(always)]
2069#[target_feature(enable = "sve,sve2,sve2-aes")]
2070#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2071#[cfg_attr(test, assert_instr(aesimc))]
2072pub fn svaesimc_u8(op: svuint8_t) -> svuint8_t {
2073 unsafe extern "unadjusted" {
2074 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesimc")]
2075 fn _svaesimc_u8(op: svint8_t) -> svint8_t;
2076 }
2077 unsafe { _svaesimc_u8(op.as_signed()).as_unsigned() }
2078}
2079#[doc = "AES mix columns"]
2080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesmc[_u8])"]
2081#[inline(always)]
2082#[target_feature(enable = "sve,sve2,sve2-aes")]
2083#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2084#[cfg_attr(test, assert_instr(aesmc))]
2085pub fn svaesmc_u8(op: svuint8_t) -> svuint8_t {
2086 unsafe extern "unadjusted" {
2087 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesmc")]
2088 fn _svaesmc_u8(op: svint8_t) -> svint8_t;
2089 }
2090 unsafe { _svaesmc_u8(op.as_signed()).as_unsigned() }
2091}
2092#[doc = "Bitwise clear and exclusive OR"]
2093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s8])"]
2094#[inline(always)]
2095#[target_feature(enable = "sve,sve2")]
2096#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2097#[cfg_attr(test, assert_instr(bcax))]
2098pub fn svbcax_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
2099 unsafe extern "unadjusted" {
2100 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv16i8")]
2101 fn _svbcax_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
2102 }
2103 unsafe { _svbcax_s8(op1, op2, op3) }
2104}
2105#[doc = "Bitwise clear and exclusive OR"]
2106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s8])"]
2107#[inline(always)]
2108#[target_feature(enable = "sve,sve2")]
2109#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2110#[cfg_attr(test, assert_instr(bcax))]
2111pub fn svbcax_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
2112 svbcax_s8(op1, op2, svdup_n_s8(op3))
2113}
2114#[doc = "Bitwise clear and exclusive OR"]
2115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s16])"]
2116#[inline(always)]
2117#[target_feature(enable = "sve,sve2")]
2118#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2119#[cfg_attr(test, assert_instr(bcax))]
2120pub fn svbcax_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
2121 unsafe extern "unadjusted" {
2122 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv8i16")]
2123 fn _svbcax_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
2124 }
2125 unsafe { _svbcax_s16(op1, op2, op3) }
2126}
2127#[doc = "Bitwise clear and exclusive OR"]
2128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s16])"]
2129#[inline(always)]
2130#[target_feature(enable = "sve,sve2")]
2131#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2132#[cfg_attr(test, assert_instr(bcax))]
2133pub fn svbcax_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
2134 svbcax_s16(op1, op2, svdup_n_s16(op3))
2135}
2136#[doc = "Bitwise clear and exclusive OR"]
2137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s32])"]
2138#[inline(always)]
2139#[target_feature(enable = "sve,sve2")]
2140#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2141#[cfg_attr(test, assert_instr(bcax))]
2142pub fn svbcax_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
2143 unsafe extern "unadjusted" {
2144 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv4i32")]
2145 fn _svbcax_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
2146 }
2147 unsafe { _svbcax_s32(op1, op2, op3) }
2148}
2149#[doc = "Bitwise clear and exclusive OR"]
2150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s32])"]
2151#[inline(always)]
2152#[target_feature(enable = "sve,sve2")]
2153#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2154#[cfg_attr(test, assert_instr(bcax))]
2155pub fn svbcax_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
2156 svbcax_s32(op1, op2, svdup_n_s32(op3))
2157}
2158#[doc = "Bitwise clear and exclusive OR"]
2159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s64])"]
2160#[inline(always)]
2161#[target_feature(enable = "sve,sve2")]
2162#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2163#[cfg_attr(test, assert_instr(bcax))]
2164pub fn svbcax_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
2165 unsafe extern "unadjusted" {
2166 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv2i64")]
2167 fn _svbcax_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
2168 }
2169 unsafe { _svbcax_s64(op1, op2, op3) }
2170}
2171#[doc = "Bitwise clear and exclusive OR"]
2172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s64])"]
2173#[inline(always)]
2174#[target_feature(enable = "sve,sve2")]
2175#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2176#[cfg_attr(test, assert_instr(bcax))]
2177pub fn svbcax_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
2178 svbcax_s64(op1, op2, svdup_n_s64(op3))
2179}
2180#[doc = "Bitwise clear and exclusive OR"]
2181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u8])"]
2182#[inline(always)]
2183#[target_feature(enable = "sve,sve2")]
2184#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2185#[cfg_attr(test, assert_instr(bcax))]
2186pub fn svbcax_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
2187 unsafe { svbcax_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
2188}
2189#[doc = "Bitwise clear and exclusive OR"]
2190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u8])"]
2191#[inline(always)]
2192#[target_feature(enable = "sve,sve2")]
2193#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2194#[cfg_attr(test, assert_instr(bcax))]
2195pub fn svbcax_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
2196 svbcax_u8(op1, op2, svdup_n_u8(op3))
2197}
2198#[doc = "Bitwise clear and exclusive OR"]
2199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u16])"]
2200#[inline(always)]
2201#[target_feature(enable = "sve,sve2")]
2202#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2203#[cfg_attr(test, assert_instr(bcax))]
2204pub fn svbcax_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
2205 unsafe { svbcax_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
2206}
2207#[doc = "Bitwise clear and exclusive OR"]
2208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u16])"]
2209#[inline(always)]
2210#[target_feature(enable = "sve,sve2")]
2211#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2212#[cfg_attr(test, assert_instr(bcax))]
2213pub fn svbcax_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
2214 svbcax_u16(op1, op2, svdup_n_u16(op3))
2215}
2216#[doc = "Bitwise clear and exclusive OR"]
2217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u32])"]
2218#[inline(always)]
2219#[target_feature(enable = "sve,sve2")]
2220#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2221#[cfg_attr(test, assert_instr(bcax))]
2222pub fn svbcax_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
2223 unsafe { svbcax_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
2224}
2225#[doc = "Bitwise clear and exclusive OR"]
2226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u32])"]
2227#[inline(always)]
2228#[target_feature(enable = "sve,sve2")]
2229#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2230#[cfg_attr(test, assert_instr(bcax))]
2231pub fn svbcax_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
2232 svbcax_u32(op1, op2, svdup_n_u32(op3))
2233}
2234#[doc = "Bitwise clear and exclusive OR"]
2235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u64])"]
2236#[inline(always)]
2237#[target_feature(enable = "sve,sve2")]
2238#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2239#[cfg_attr(test, assert_instr(bcax))]
2240pub fn svbcax_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
2241 unsafe { svbcax_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
2242}
2243#[doc = "Bitwise clear and exclusive OR"]
2244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u64])"]
2245#[inline(always)]
2246#[target_feature(enable = "sve,sve2")]
2247#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2248#[cfg_attr(test, assert_instr(bcax))]
2249pub fn svbcax_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
2250 svbcax_u64(op1, op2, svdup_n_u64(op3))
2251}
2252#[doc = "Scatter lower bits into positions selected by bitmask"]
2253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u8])"]
2254#[inline(always)]
2255#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2256#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2257#[cfg_attr(test, assert_instr(bdep))]
2258pub fn svbdep_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
2259 unsafe extern "unadjusted" {
2260 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv16i8")]
2261 fn _svbdep_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
2262 }
2263 unsafe { _svbdep_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
2264}
2265#[doc = "Scatter lower bits into positions selected by bitmask"]
2266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u8])"]
2267#[inline(always)]
2268#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2269#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2270#[cfg_attr(test, assert_instr(bdep))]
2271pub fn svbdep_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
2272 svbdep_u8(op1, svdup_n_u8(op2))
2273}
2274#[doc = "Scatter lower bits into positions selected by bitmask"]
2275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u16])"]
2276#[inline(always)]
2277#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2278#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2279#[cfg_attr(test, assert_instr(bdep))]
2280pub fn svbdep_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
2281 unsafe extern "unadjusted" {
2282 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv8i16")]
2283 fn _svbdep_u16(op1: svint16_t, op2: svint16_t) -> svint16_t;
2284 }
2285 unsafe { _svbdep_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
2286}
2287#[doc = "Scatter lower bits into positions selected by bitmask"]
2288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u16])"]
2289#[inline(always)]
2290#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2291#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2292#[cfg_attr(test, assert_instr(bdep))]
2293pub fn svbdep_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t {
2294 svbdep_u16(op1, svdup_n_u16(op2))
2295}
2296#[doc = "Scatter lower bits into positions selected by bitmask"]
2297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u32])"]
2298#[inline(always)]
2299#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2300#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2301#[cfg_attr(test, assert_instr(bdep))]
2302pub fn svbdep_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
2303 unsafe extern "unadjusted" {
2304 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv4i32")]
2305 fn _svbdep_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
2306 }
2307 unsafe { _svbdep_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
2308}
2309#[doc = "Scatter lower bits into positions selected by bitmask"]
2310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u32])"]
2311#[inline(always)]
2312#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2313#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2314#[cfg_attr(test, assert_instr(bdep))]
2315pub fn svbdep_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t {
2316 svbdep_u32(op1, svdup_n_u32(op2))
2317}
2318#[doc = "Scatter lower bits into positions selected by bitmask"]
2319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u64])"]
2320#[inline(always)]
2321#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2322#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2323#[cfg_attr(test, assert_instr(bdep))]
2324pub fn svbdep_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
2325 unsafe extern "unadjusted" {
2326 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv2i64")]
2327 fn _svbdep_u64(op1: svint64_t, op2: svint64_t) -> svint64_t;
2328 }
2329 unsafe { _svbdep_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
2330}
2331#[doc = "Scatter lower bits into positions selected by bitmask"]
2332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u64])"]
2333#[inline(always)]
2334#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2335#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2336#[cfg_attr(test, assert_instr(bdep))]
2337pub fn svbdep_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t {
2338 svbdep_u64(op1, svdup_n_u64(op2))
2339}
2340#[doc = "Gather lower bits from positions selected by bitmask"]
2341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u8])"]
2342#[inline(always)]
2343#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2344#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2345#[cfg_attr(test, assert_instr(bext))]
2346pub fn svbext_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
2347 unsafe extern "unadjusted" {
2348 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv16i8")]
2349 fn _svbext_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
2350 }
2351 unsafe { _svbext_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
2352}
2353#[doc = "Gather lower bits from positions selected by bitmask"]
2354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u8])"]
2355#[inline(always)]
2356#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2357#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2358#[cfg_attr(test, assert_instr(bext))]
2359pub fn svbext_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
2360 svbext_u8(op1, svdup_n_u8(op2))
2361}
2362#[doc = "Gather lower bits from positions selected by bitmask"]
2363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u16])"]
2364#[inline(always)]
2365#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2366#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2367#[cfg_attr(test, assert_instr(bext))]
2368pub fn svbext_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
2369 unsafe extern "unadjusted" {
2370 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv8i16")]
2371 fn _svbext_u16(op1: svint16_t, op2: svint16_t) -> svint16_t;
2372 }
2373 unsafe { _svbext_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
2374}
2375#[doc = "Gather lower bits from positions selected by bitmask"]
2376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u16])"]
2377#[inline(always)]
2378#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2379#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2380#[cfg_attr(test, assert_instr(bext))]
2381pub fn svbext_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t {
2382 svbext_u16(op1, svdup_n_u16(op2))
2383}
2384#[doc = "Gather lower bits from positions selected by bitmask"]
2385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u32])"]
2386#[inline(always)]
2387#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2388#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2389#[cfg_attr(test, assert_instr(bext))]
2390pub fn svbext_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
2391 unsafe extern "unadjusted" {
2392 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv4i32")]
2393 fn _svbext_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
2394 }
2395 unsafe { _svbext_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
2396}
2397#[doc = "Gather lower bits from positions selected by bitmask"]
2398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u32])"]
2399#[inline(always)]
2400#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2401#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2402#[cfg_attr(test, assert_instr(bext))]
2403pub fn svbext_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t {
2404 svbext_u32(op1, svdup_n_u32(op2))
2405}
2406#[doc = "Gather lower bits from positions selected by bitmask"]
2407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u64])"]
2408#[inline(always)]
2409#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2410#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2411#[cfg_attr(test, assert_instr(bext))]
2412pub fn svbext_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
2413 unsafe extern "unadjusted" {
2414 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv2i64")]
2415 fn _svbext_u64(op1: svint64_t, op2: svint64_t) -> svint64_t;
2416 }
2417 unsafe { _svbext_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
2418}
2419#[doc = "Gather lower bits from positions selected by bitmask"]
2420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u64])"]
2421#[inline(always)]
2422#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2423#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2424#[cfg_attr(test, assert_instr(bext))]
2425pub fn svbext_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t {
2426 svbext_u64(op1, svdup_n_u64(op2))
2427}
2428#[doc = "Group bits to right or left as selected by bitmask"]
2429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u8])"]
2430#[inline(always)]
2431#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2432#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2433#[cfg_attr(test, assert_instr(bgrp))]
2434pub fn svbgrp_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
2435 unsafe extern "unadjusted" {
2436 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv16i8")]
2437 fn _svbgrp_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
2438 }
2439 unsafe { _svbgrp_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
2440}
2441#[doc = "Group bits to right or left as selected by bitmask"]
2442#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u8])"]
2443#[inline(always)]
2444#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2445#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2446#[cfg_attr(test, assert_instr(bgrp))]
2447pub fn svbgrp_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
2448 svbgrp_u8(op1, svdup_n_u8(op2))
2449}
2450#[doc = "Group bits to right or left as selected by bitmask"]
2451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u16])"]
2452#[inline(always)]
2453#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2454#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2455#[cfg_attr(test, assert_instr(bgrp))]
2456pub fn svbgrp_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
2457 unsafe extern "unadjusted" {
2458 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv8i16")]
2459 fn _svbgrp_u16(op1: svint16_t, op2: svint16_t) -> svint16_t;
2460 }
2461 unsafe { _svbgrp_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
2462}
2463#[doc = "Group bits to right or left as selected by bitmask"]
2464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u16])"]
2465#[inline(always)]
2466#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2467#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2468#[cfg_attr(test, assert_instr(bgrp))]
2469pub fn svbgrp_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t {
2470 svbgrp_u16(op1, svdup_n_u16(op2))
2471}
2472#[doc = "Group bits to right or left as selected by bitmask"]
2473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u32])"]
2474#[inline(always)]
2475#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2476#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2477#[cfg_attr(test, assert_instr(bgrp))]
2478pub fn svbgrp_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
2479 unsafe extern "unadjusted" {
2480 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv4i32")]
2481 fn _svbgrp_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
2482 }
2483 unsafe { _svbgrp_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
2484}
2485#[doc = "Group bits to right or left as selected by bitmask"]
2486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u32])"]
2487#[inline(always)]
2488#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2489#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2490#[cfg_attr(test, assert_instr(bgrp))]
2491pub fn svbgrp_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t {
2492 svbgrp_u32(op1, svdup_n_u32(op2))
2493}
2494#[doc = "Group bits to right or left as selected by bitmask"]
2495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u64])"]
2496#[inline(always)]
2497#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2498#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2499#[cfg_attr(test, assert_instr(bgrp))]
2500pub fn svbgrp_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
2501 unsafe extern "unadjusted" {
2502 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv2i64")]
2503 fn _svbgrp_u64(op1: svint64_t, op2: svint64_t) -> svint64_t;
2504 }
2505 unsafe { _svbgrp_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
2506}
2507#[doc = "Group bits to right or left as selected by bitmask"]
2508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u64])"]
2509#[inline(always)]
2510#[target_feature(enable = "sve,sve2,sve2-bitperm")]
2511#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2512#[cfg_attr(test, assert_instr(bgrp))]
2513pub fn svbgrp_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t {
2514 svbgrp_u64(op1, svdup_n_u64(op2))
2515}
2516#[doc = "Bitwise select with first input inverted"]
2517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s8])"]
2518#[inline(always)]
2519#[target_feature(enable = "sve,sve2")]
2520#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2521#[cfg_attr(test, assert_instr(bsl1n))]
2522pub fn svbsl1n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
2523 unsafe extern "unadjusted" {
2524 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv16i8")]
2525 fn _svbsl1n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
2526 }
2527 unsafe { _svbsl1n_s8(op1, op2, op3) }
2528}
2529#[doc = "Bitwise select with first input inverted"]
2530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s8])"]
2531#[inline(always)]
2532#[target_feature(enable = "sve,sve2")]
2533#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2534#[cfg_attr(test, assert_instr(bsl1n))]
2535pub fn svbsl1n_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
2536 svbsl1n_s8(op1, op2, svdup_n_s8(op3))
2537}
2538#[doc = "Bitwise select with first input inverted"]
2539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s16])"]
2540#[inline(always)]
2541#[target_feature(enable = "sve,sve2")]
2542#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2543#[cfg_attr(test, assert_instr(bsl1n))]
2544pub fn svbsl1n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
2545 unsafe extern "unadjusted" {
2546 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv8i16")]
2547 fn _svbsl1n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
2548 }
2549 unsafe { _svbsl1n_s16(op1, op2, op3) }
2550}
2551#[doc = "Bitwise select with first input inverted"]
2552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s16])"]
2553#[inline(always)]
2554#[target_feature(enable = "sve,sve2")]
2555#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2556#[cfg_attr(test, assert_instr(bsl1n))]
2557pub fn svbsl1n_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
2558 svbsl1n_s16(op1, op2, svdup_n_s16(op3))
2559}
2560#[doc = "Bitwise select with first input inverted"]
2561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s32])"]
2562#[inline(always)]
2563#[target_feature(enable = "sve,sve2")]
2564#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2565#[cfg_attr(test, assert_instr(bsl1n))]
2566pub fn svbsl1n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
2567 unsafe extern "unadjusted" {
2568 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv4i32")]
2569 fn _svbsl1n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
2570 }
2571 unsafe { _svbsl1n_s32(op1, op2, op3) }
2572}
2573#[doc = "Bitwise select with first input inverted"]
2574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s32])"]
2575#[inline(always)]
2576#[target_feature(enable = "sve,sve2")]
2577#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2578#[cfg_attr(test, assert_instr(bsl1n))]
2579pub fn svbsl1n_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
2580 svbsl1n_s32(op1, op2, svdup_n_s32(op3))
2581}
2582#[doc = "Bitwise select with first input inverted"]
2583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s64])"]
2584#[inline(always)]
2585#[target_feature(enable = "sve,sve2")]
2586#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2587#[cfg_attr(test, assert_instr(bsl1n))]
2588pub fn svbsl1n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
2589 unsafe extern "unadjusted" {
2590 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv2i64")]
2591 fn _svbsl1n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
2592 }
2593 unsafe { _svbsl1n_s64(op1, op2, op3) }
2594}
2595#[doc = "Bitwise select with first input inverted"]
2596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s64])"]
2597#[inline(always)]
2598#[target_feature(enable = "sve,sve2")]
2599#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2600#[cfg_attr(test, assert_instr(bsl1n))]
2601pub fn svbsl1n_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
2602 svbsl1n_s64(op1, op2, svdup_n_s64(op3))
2603}
2604#[doc = "Bitwise select with first input inverted"]
2605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u8])"]
2606#[inline(always)]
2607#[target_feature(enable = "sve,sve2")]
2608#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2609#[cfg_attr(test, assert_instr(bsl1n))]
2610pub fn svbsl1n_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
2611 unsafe { svbsl1n_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
2612}
2613#[doc = "Bitwise select with first input inverted"]
2614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u8])"]
2615#[inline(always)]
2616#[target_feature(enable = "sve,sve2")]
2617#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2618#[cfg_attr(test, assert_instr(bsl1n))]
2619pub fn svbsl1n_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
2620 svbsl1n_u8(op1, op2, svdup_n_u8(op3))
2621}
2622#[doc = "Bitwise select with first input inverted"]
2623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u16])"]
2624#[inline(always)]
2625#[target_feature(enable = "sve,sve2")]
2626#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2627#[cfg_attr(test, assert_instr(bsl1n))]
2628pub fn svbsl1n_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
2629 unsafe { svbsl1n_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
2630}
2631#[doc = "Bitwise select with first input inverted"]
2632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u16])"]
2633#[inline(always)]
2634#[target_feature(enable = "sve,sve2")]
2635#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2636#[cfg_attr(test, assert_instr(bsl1n))]
2637pub fn svbsl1n_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
2638 svbsl1n_u16(op1, op2, svdup_n_u16(op3))
2639}
2640#[doc = "Bitwise select with first input inverted"]
2641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u32])"]
2642#[inline(always)]
2643#[target_feature(enable = "sve,sve2")]
2644#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2645#[cfg_attr(test, assert_instr(bsl1n))]
2646pub fn svbsl1n_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
2647 unsafe { svbsl1n_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
2648}
2649#[doc = "Bitwise select with first input inverted"]
2650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u32])"]
2651#[inline(always)]
2652#[target_feature(enable = "sve,sve2")]
2653#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2654#[cfg_attr(test, assert_instr(bsl1n))]
2655pub fn svbsl1n_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
2656 svbsl1n_u32(op1, op2, svdup_n_u32(op3))
2657}
2658#[doc = "Bitwise select with first input inverted"]
2659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u64])"]
2660#[inline(always)]
2661#[target_feature(enable = "sve,sve2")]
2662#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2663#[cfg_attr(test, assert_instr(bsl1n))]
2664pub fn svbsl1n_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
2665 unsafe { svbsl1n_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
2666}
2667#[doc = "Bitwise select with first input inverted"]
2668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u64])"]
2669#[inline(always)]
2670#[target_feature(enable = "sve,sve2")]
2671#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2672#[cfg_attr(test, assert_instr(bsl1n))]
2673pub fn svbsl1n_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
2674 svbsl1n_u64(op1, op2, svdup_n_u64(op3))
2675}
2676#[doc = "Bitwise select with second input inverted"]
2677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s8])"]
2678#[inline(always)]
2679#[target_feature(enable = "sve,sve2")]
2680#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2681#[cfg_attr(test, assert_instr(bsl2n))]
2682pub fn svbsl2n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
2683 unsafe extern "unadjusted" {
2684 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv16i8")]
2685 fn _svbsl2n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
2686 }
2687 unsafe { _svbsl2n_s8(op1, op2, op3) }
2688}
2689#[doc = "Bitwise select with second input inverted"]
2690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s8])"]
2691#[inline(always)]
2692#[target_feature(enable = "sve,sve2")]
2693#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2694#[cfg_attr(test, assert_instr(bsl2n))]
2695pub fn svbsl2n_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
2696 svbsl2n_s8(op1, op2, svdup_n_s8(op3))
2697}
2698#[doc = "Bitwise select with second input inverted"]
2699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s16])"]
2700#[inline(always)]
2701#[target_feature(enable = "sve,sve2")]
2702#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2703#[cfg_attr(test, assert_instr(bsl2n))]
2704pub fn svbsl2n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
2705 unsafe extern "unadjusted" {
2706 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv8i16")]
2707 fn _svbsl2n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
2708 }
2709 unsafe { _svbsl2n_s16(op1, op2, op3) }
2710}
2711#[doc = "Bitwise select with second input inverted"]
2712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s16])"]
2713#[inline(always)]
2714#[target_feature(enable = "sve,sve2")]
2715#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2716#[cfg_attr(test, assert_instr(bsl2n))]
2717pub fn svbsl2n_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
2718 svbsl2n_s16(op1, op2, svdup_n_s16(op3))
2719}
2720#[doc = "Bitwise select with second input inverted"]
2721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s32])"]
2722#[inline(always)]
2723#[target_feature(enable = "sve,sve2")]
2724#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2725#[cfg_attr(test, assert_instr(bsl2n))]
2726pub fn svbsl2n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
2727 unsafe extern "unadjusted" {
2728 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv4i32")]
2729 fn _svbsl2n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
2730 }
2731 unsafe { _svbsl2n_s32(op1, op2, op3) }
2732}
2733#[doc = "Bitwise select with second input inverted"]
2734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s32])"]
2735#[inline(always)]
2736#[target_feature(enable = "sve,sve2")]
2737#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2738#[cfg_attr(test, assert_instr(bsl2n))]
2739pub fn svbsl2n_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
2740 svbsl2n_s32(op1, op2, svdup_n_s32(op3))
2741}
2742#[doc = "Bitwise select with second input inverted"]
2743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s64])"]
2744#[inline(always)]
2745#[target_feature(enable = "sve,sve2")]
2746#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2747#[cfg_attr(test, assert_instr(bsl2n))]
2748pub fn svbsl2n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
2749 unsafe extern "unadjusted" {
2750 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv2i64")]
2751 fn _svbsl2n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
2752 }
2753 unsafe { _svbsl2n_s64(op1, op2, op3) }
2754}
2755#[doc = "Bitwise select with second input inverted"]
2756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s64])"]
2757#[inline(always)]
2758#[target_feature(enable = "sve,sve2")]
2759#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2760#[cfg_attr(test, assert_instr(bsl2n))]
2761pub fn svbsl2n_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
2762 svbsl2n_s64(op1, op2, svdup_n_s64(op3))
2763}
2764#[doc = "Bitwise select with second input inverted"]
2765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u8])"]
2766#[inline(always)]
2767#[target_feature(enable = "sve,sve2")]
2768#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2769#[cfg_attr(test, assert_instr(bsl2n))]
2770pub fn svbsl2n_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
2771 unsafe { svbsl2n_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
2772}
2773#[doc = "Bitwise select with second input inverted"]
2774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u8])"]
2775#[inline(always)]
2776#[target_feature(enable = "sve,sve2")]
2777#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2778#[cfg_attr(test, assert_instr(bsl2n))]
2779pub fn svbsl2n_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
2780 svbsl2n_u8(op1, op2, svdup_n_u8(op3))
2781}
2782#[doc = "Bitwise select with second input inverted"]
2783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u16])"]
2784#[inline(always)]
2785#[target_feature(enable = "sve,sve2")]
2786#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2787#[cfg_attr(test, assert_instr(bsl2n))]
2788pub fn svbsl2n_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
2789 unsafe { svbsl2n_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
2790}
2791#[doc = "Bitwise select with second input inverted"]
2792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u16])"]
2793#[inline(always)]
2794#[target_feature(enable = "sve,sve2")]
2795#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2796#[cfg_attr(test, assert_instr(bsl2n))]
2797pub fn svbsl2n_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
2798 svbsl2n_u16(op1, op2, svdup_n_u16(op3))
2799}
2800#[doc = "Bitwise select with second input inverted"]
2801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u32])"]
2802#[inline(always)]
2803#[target_feature(enable = "sve,sve2")]
2804#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2805#[cfg_attr(test, assert_instr(bsl2n))]
2806pub fn svbsl2n_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
2807 unsafe { svbsl2n_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
2808}
2809#[doc = "Bitwise select with second input inverted"]
2810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u32])"]
2811#[inline(always)]
2812#[target_feature(enable = "sve,sve2")]
2813#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2814#[cfg_attr(test, assert_instr(bsl2n))]
2815pub fn svbsl2n_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
2816 svbsl2n_u32(op1, op2, svdup_n_u32(op3))
2817}
2818#[doc = "Bitwise select with second input inverted"]
2819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u64])"]
2820#[inline(always)]
2821#[target_feature(enable = "sve,sve2")]
2822#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2823#[cfg_attr(test, assert_instr(bsl2n))]
2824pub fn svbsl2n_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
2825 unsafe { svbsl2n_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
2826}
2827#[doc = "Bitwise select with second input inverted"]
2828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u64])"]
2829#[inline(always)]
2830#[target_feature(enable = "sve,sve2")]
2831#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2832#[cfg_attr(test, assert_instr(bsl2n))]
2833pub fn svbsl2n_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
2834 svbsl2n_u64(op1, op2, svdup_n_u64(op3))
2835}
2836#[doc = "Bitwise select"]
2837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s8])"]
2838#[inline(always)]
2839#[target_feature(enable = "sve,sve2")]
2840#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2841#[cfg_attr(test, assert_instr(bsl))]
2842pub fn svbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
2843 unsafe extern "unadjusted" {
2844 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv16i8")]
2845 fn _svbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
2846 }
2847 unsafe { _svbsl_s8(op1, op2, op3) }
2848}
2849#[doc = "Bitwise select"]
2850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s8])"]
2851#[inline(always)]
2852#[target_feature(enable = "sve,sve2")]
2853#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2854#[cfg_attr(test, assert_instr(bsl))]
2855pub fn svbsl_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
2856 svbsl_s8(op1, op2, svdup_n_s8(op3))
2857}
2858#[doc = "Bitwise select"]
2859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s16])"]
2860#[inline(always)]
2861#[target_feature(enable = "sve,sve2")]
2862#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2863#[cfg_attr(test, assert_instr(bsl))]
2864pub fn svbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
2865 unsafe extern "unadjusted" {
2866 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv8i16")]
2867 fn _svbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
2868 }
2869 unsafe { _svbsl_s16(op1, op2, op3) }
2870}
2871#[doc = "Bitwise select"]
2872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s16])"]
2873#[inline(always)]
2874#[target_feature(enable = "sve,sve2")]
2875#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2876#[cfg_attr(test, assert_instr(bsl))]
2877pub fn svbsl_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
2878 svbsl_s16(op1, op2, svdup_n_s16(op3))
2879}
2880#[doc = "Bitwise select"]
2881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s32])"]
2882#[inline(always)]
2883#[target_feature(enable = "sve,sve2")]
2884#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2885#[cfg_attr(test, assert_instr(bsl))]
2886pub fn svbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
2887 unsafe extern "unadjusted" {
2888 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv4i32")]
2889 fn _svbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
2890 }
2891 unsafe { _svbsl_s32(op1, op2, op3) }
2892}
2893#[doc = "Bitwise select"]
2894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s32])"]
2895#[inline(always)]
2896#[target_feature(enable = "sve,sve2")]
2897#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2898#[cfg_attr(test, assert_instr(bsl))]
2899pub fn svbsl_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
2900 svbsl_s32(op1, op2, svdup_n_s32(op3))
2901}
2902#[doc = "Bitwise select"]
2903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s64])"]
2904#[inline(always)]
2905#[target_feature(enable = "sve,sve2")]
2906#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2907#[cfg_attr(test, assert_instr(bsl))]
2908pub fn svbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
2909 unsafe extern "unadjusted" {
2910 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv2i64")]
2911 fn _svbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
2912 }
2913 unsafe { _svbsl_s64(op1, op2, op3) }
2914}
2915#[doc = "Bitwise select"]
2916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s64])"]
2917#[inline(always)]
2918#[target_feature(enable = "sve,sve2")]
2919#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2920#[cfg_attr(test, assert_instr(bsl))]
2921pub fn svbsl_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
2922 svbsl_s64(op1, op2, svdup_n_s64(op3))
2923}
2924#[doc = "Bitwise select"]
2925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u8])"]
2926#[inline(always)]
2927#[target_feature(enable = "sve,sve2")]
2928#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2929#[cfg_attr(test, assert_instr(bsl))]
2930pub fn svbsl_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
2931 unsafe { svbsl_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
2932}
2933#[doc = "Bitwise select"]
2934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u8])"]
2935#[inline(always)]
2936#[target_feature(enable = "sve,sve2")]
2937#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2938#[cfg_attr(test, assert_instr(bsl))]
2939pub fn svbsl_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
2940 svbsl_u8(op1, op2, svdup_n_u8(op3))
2941}
2942#[doc = "Bitwise select"]
2943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u16])"]
2944#[inline(always)]
2945#[target_feature(enable = "sve,sve2")]
2946#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2947#[cfg_attr(test, assert_instr(bsl))]
2948pub fn svbsl_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
2949 unsafe { svbsl_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
2950}
2951#[doc = "Bitwise select"]
2952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u16])"]
2953#[inline(always)]
2954#[target_feature(enable = "sve,sve2")]
2955#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2956#[cfg_attr(test, assert_instr(bsl))]
2957pub fn svbsl_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
2958 svbsl_u16(op1, op2, svdup_n_u16(op3))
2959}
2960#[doc = "Bitwise select"]
2961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u32])"]
2962#[inline(always)]
2963#[target_feature(enable = "sve,sve2")]
2964#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2965#[cfg_attr(test, assert_instr(bsl))]
2966pub fn svbsl_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
2967 unsafe { svbsl_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
2968}
2969#[doc = "Bitwise select"]
2970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u32])"]
2971#[inline(always)]
2972#[target_feature(enable = "sve,sve2")]
2973#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2974#[cfg_attr(test, assert_instr(bsl))]
2975pub fn svbsl_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
2976 svbsl_u32(op1, op2, svdup_n_u32(op3))
2977}
2978#[doc = "Bitwise select"]
2979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u64])"]
2980#[inline(always)]
2981#[target_feature(enable = "sve,sve2")]
2982#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2983#[cfg_attr(test, assert_instr(bsl))]
2984pub fn svbsl_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
2985 unsafe { svbsl_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
2986}
2987#[doc = "Bitwise select"]
2988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u64])"]
2989#[inline(always)]
2990#[target_feature(enable = "sve,sve2")]
2991#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2992#[cfg_attr(test, assert_instr(bsl))]
2993pub fn svbsl_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
2994 svbsl_u64(op1, op2, svdup_n_u64(op3))
2995}
2996#[doc = "Complex add with rotate"]
2997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s8])"]
2998#[inline(always)]
2999#[target_feature(enable = "sve,sve2")]
3000#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3001#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))]
3002pub fn svcadd_s8<const IMM_ROTATION: i32>(op1: svint8_t, op2: svint8_t) -> svint8_t {
3003 static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
3004 unsafe extern "unadjusted" {
3005 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv16i8")]
3006 fn _svcadd_s8(op1: svint8_t, op2: svint8_t, imm_rotation: i32) -> svint8_t;
3007 }
3008 unsafe { _svcadd_s8(op1, op2, IMM_ROTATION) }
3009}
3010#[doc = "Complex add with rotate"]
3011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s16])"]
3012#[inline(always)]
3013#[target_feature(enable = "sve,sve2")]
3014#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3015#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))]
3016pub fn svcadd_s16<const IMM_ROTATION: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
3017 static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
3018 unsafe extern "unadjusted" {
3019 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv8i16")]
3020 fn _svcadd_s16(op1: svint16_t, op2: svint16_t, imm_rotation: i32) -> svint16_t;
3021 }
3022 unsafe { _svcadd_s16(op1, op2, IMM_ROTATION) }
3023}
3024#[doc = "Complex add with rotate"]
3025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s32])"]
3026#[inline(always)]
3027#[target_feature(enable = "sve,sve2")]
3028#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3029#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))]
3030pub fn svcadd_s32<const IMM_ROTATION: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
3031 static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
3032 unsafe extern "unadjusted" {
3033 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv4i32")]
3034 fn _svcadd_s32(op1: svint32_t, op2: svint32_t, imm_rotation: i32) -> svint32_t;
3035 }
3036 unsafe { _svcadd_s32(op1, op2, IMM_ROTATION) }
3037}
3038#[doc = "Complex add with rotate"]
3039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s64])"]
3040#[inline(always)]
3041#[target_feature(enable = "sve,sve2")]
3042#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3043#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))]
3044pub fn svcadd_s64<const IMM_ROTATION: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
3045 static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
3046 unsafe extern "unadjusted" {
3047 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv2i64")]
3048 fn _svcadd_s64(op1: svint64_t, op2: svint64_t, imm_rotation: i32) -> svint64_t;
3049 }
3050 unsafe { _svcadd_s64(op1, op2, IMM_ROTATION) }
3051}
3052#[doc = "Complex add with rotate"]
3053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u8])"]
3054#[inline(always)]
3055#[target_feature(enable = "sve,sve2")]
3056#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3057#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))]
3058pub fn svcadd_u8<const IMM_ROTATION: i32>(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
3059 static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
3060 unsafe { svcadd_s8::<IMM_ROTATION>(op1.as_signed(), op2.as_signed()).as_unsigned() }
3061}
3062#[doc = "Complex add with rotate"]
3063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u16])"]
3064#[inline(always)]
3065#[target_feature(enable = "sve,sve2")]
3066#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3067#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))]
3068pub fn svcadd_u16<const IMM_ROTATION: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
3069 static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
3070 unsafe { svcadd_s16::<IMM_ROTATION>(op1.as_signed(), op2.as_signed()).as_unsigned() }
3071}
3072#[doc = "Complex add with rotate"]
3073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u32])"]
3074#[inline(always)]
3075#[target_feature(enable = "sve,sve2")]
3076#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3077#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))]
3078pub fn svcadd_u32<const IMM_ROTATION: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
3079 static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
3080 unsafe { svcadd_s32::<IMM_ROTATION>(op1.as_signed(), op2.as_signed()).as_unsigned() }
3081}
3082#[doc = "Complex add with rotate"]
3083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u64])"]
3084#[inline(always)]
3085#[target_feature(enable = "sve,sve2")]
3086#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3087#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))]
3088pub fn svcadd_u64<const IMM_ROTATION: i32>(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
3089 static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
3090 unsafe { svcadd_s64::<IMM_ROTATION>(op1.as_signed(), op2.as_signed()).as_unsigned() }
3091}
3092#[doc = "Complex dot product"]
3093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot_lane[_s32])"]
3094#[inline(always)]
3095#[target_feature(enable = "sve,sve2")]
3096#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3097#[cfg_attr(test, assert_instr(cdot, IMM_INDEX = 0, IMM_ROTATION = 90))]
3098pub fn svcdot_lane_s32<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
3099 op1: svint32_t,
3100 op2: svint8_t,
3101 op3: svint8_t,
3102) -> svint32_t {
3103 static_assert_range!(IMM_INDEX, 0..=3);
3104 static_assert!(
3105 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
3106 );
3107 unsafe extern "unadjusted" {
3108 #[cfg_attr(
3109 target_arch = "aarch64",
3110 link_name = "llvm.aarch64.sve.cdot.lane.nxv4i32"
3111 )]
3112 fn _svcdot_lane_s32(
3113 op1: svint32_t,
3114 op2: svint8_t,
3115 op3: svint8_t,
3116 imm_index: i32,
3117 imm_rotation: i32,
3118 ) -> svint32_t;
3119 }
3120 unsafe { _svcdot_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) }
3121}
3122#[doc = "Complex dot product"]
3123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot_lane[_s64])"]
3124#[inline(always)]
3125#[target_feature(enable = "sve,sve2")]
3126#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3127#[cfg_attr(test, assert_instr(cdot, IMM_INDEX = 0, IMM_ROTATION = 90))]
3128pub fn svcdot_lane_s64<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
3129 op1: svint64_t,
3130 op2: svint16_t,
3131 op3: svint16_t,
3132) -> svint64_t {
3133 static_assert_range!(IMM_INDEX, 0..=1);
3134 static_assert!(
3135 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
3136 );
3137 unsafe extern "unadjusted" {
3138 #[cfg_attr(
3139 target_arch = "aarch64",
3140 link_name = "llvm.aarch64.sve.cdot.lane.nxv2i64"
3141 )]
3142 fn _svcdot_lane_s64(
3143 op1: svint64_t,
3144 op2: svint16_t,
3145 op3: svint16_t,
3146 imm_index: i32,
3147 imm_rotation: i32,
3148 ) -> svint64_t;
3149 }
3150 unsafe { _svcdot_lane_s64(op1, op2, op3, IMM_INDEX, IMM_ROTATION) }
3151}
3152#[doc = "Complex dot product"]
3153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot[_s32])"]
3154#[inline(always)]
3155#[target_feature(enable = "sve,sve2")]
3156#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3157#[cfg_attr(test, assert_instr(cdot, IMM_ROTATION = 90))]
3158pub fn svcdot_s32<const IMM_ROTATION: i32>(
3159 op1: svint32_t,
3160 op2: svint8_t,
3161 op3: svint8_t,
3162) -> svint32_t {
3163 static_assert!(
3164 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
3165 );
3166 unsafe extern "unadjusted" {
3167 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cdot.nxv4i32")]
3168 fn _svcdot_s32(
3169 op1: svint32_t,
3170 op2: svint8_t,
3171 op3: svint8_t,
3172 imm_rotation: i32,
3173 ) -> svint32_t;
3174 }
3175 unsafe { _svcdot_s32(op1, op2, op3, IMM_ROTATION) }
3176}
3177#[doc = "Complex dot product"]
3178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot[_s64])"]
3179#[inline(always)]
3180#[target_feature(enable = "sve,sve2")]
3181#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3182#[cfg_attr(test, assert_instr(cdot, IMM_ROTATION = 90))]
3183pub fn svcdot_s64<const IMM_ROTATION: i32>(
3184 op1: svint64_t,
3185 op2: svint16_t,
3186 op3: svint16_t,
3187) -> svint64_t {
3188 static_assert!(
3189 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
3190 );
3191 unsafe extern "unadjusted" {
3192 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cdot.nxv2i64")]
3193 fn _svcdot_s64(
3194 op1: svint64_t,
3195 op2: svint16_t,
3196 op3: svint16_t,
3197 imm_rotation: i32,
3198 ) -> svint64_t;
3199 }
3200 unsafe { _svcdot_s64(op1, op2, op3, IMM_ROTATION) }
3201}
3202#[doc = "Complex multiply-add with rotate"]
3203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_s16])"]
3204#[inline(always)]
3205#[target_feature(enable = "sve,sve2")]
3206#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3207#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))]
3208pub fn svcmla_lane_s16<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
3209 op1: svint16_t,
3210 op2: svint16_t,
3211 op3: svint16_t,
3212) -> svint16_t {
3213 static_assert_range!(IMM_INDEX, 0..=3);
3214 static_assert!(
3215 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
3216 );
3217 unsafe extern "unadjusted" {
3218 #[cfg_attr(
3219 target_arch = "aarch64",
3220 link_name = "llvm.aarch64.sve.cmla.lane.x.nxv8i16"
3221 )]
3222 fn _svcmla_lane_s16(
3223 op1: svint16_t,
3224 op2: svint16_t,
3225 op3: svint16_t,
3226 imm_index: i32,
3227 imm_rotation: i32,
3228 ) -> svint16_t;
3229 }
3230 unsafe { _svcmla_lane_s16(op1, op2, op3, IMM_INDEX, IMM_ROTATION) }
3231}
3232#[doc = "Complex multiply-add with rotate"]
3233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_s32])"]
3234#[inline(always)]
3235#[target_feature(enable = "sve,sve2")]
3236#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3237#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))]
3238pub fn svcmla_lane_s32<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
3239 op1: svint32_t,
3240 op2: svint32_t,
3241 op3: svint32_t,
3242) -> svint32_t {
3243 static_assert_range!(IMM_INDEX, 0..=1);
3244 static_assert!(
3245 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
3246 );
3247 unsafe extern "unadjusted" {
3248 #[cfg_attr(
3249 target_arch = "aarch64",
3250 link_name = "llvm.aarch64.sve.cmla.lane.x.nxv4i32"
3251 )]
3252 fn _svcmla_lane_s32(
3253 op1: svint32_t,
3254 op2: svint32_t,
3255 op3: svint32_t,
3256 imm_index: i32,
3257 imm_rotation: i32,
3258 ) -> svint32_t;
3259 }
3260 unsafe { _svcmla_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) }
3261}
3262#[doc = "Complex multiply-add with rotate"]
3263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_u16])"]
3264#[inline(always)]
3265#[target_feature(enable = "sve,sve2")]
3266#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3267#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))]
3268pub fn svcmla_lane_u16<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
3269 op1: svuint16_t,
3270 op2: svuint16_t,
3271 op3: svuint16_t,
3272) -> svuint16_t {
3273 static_assert_range!(IMM_INDEX, 0..=3);
3274 static_assert!(
3275 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
3276 );
3277 unsafe {
3278 svcmla_lane_s16::<IMM_INDEX, IMM_ROTATION>(
3279 op1.as_signed(),
3280 op2.as_signed(),
3281 op3.as_signed(),
3282 )
3283 .as_unsigned()
3284 }
3285}
3286#[doc = "Complex multiply-add with rotate"]
3287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_u32])"]
3288#[inline(always)]
3289#[target_feature(enable = "sve,sve2")]
3290#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3291#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))]
3292pub fn svcmla_lane_u32<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
3293 op1: svuint32_t,
3294 op2: svuint32_t,
3295 op3: svuint32_t,
3296) -> svuint32_t {
3297 static_assert_range!(IMM_INDEX, 0..=1);
3298 static_assert!(
3299 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
3300 );
3301 unsafe {
3302 svcmla_lane_s32::<IMM_INDEX, IMM_ROTATION>(
3303 op1.as_signed(),
3304 op2.as_signed(),
3305 op3.as_signed(),
3306 )
3307 .as_unsigned()
3308 }
3309}
3310#[doc = "Complex multiply-add with rotate"]
3311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s8])"]
3312#[inline(always)]
3313#[target_feature(enable = "sve,sve2")]
3314#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3315#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))]
3316pub fn svcmla_s8<const IMM_ROTATION: i32>(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
3317 static_assert!(
3318 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
3319 );
3320 unsafe extern "unadjusted" {
3321 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv16i8")]
3322 fn _svcmla_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t, imm_rotation: i32) -> svint8_t;
3323 }
3324 unsafe { _svcmla_s8(op1, op2, op3, IMM_ROTATION) }
3325}
3326#[doc = "Complex multiply-add with rotate"]
3327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s16])"]
3328#[inline(always)]
3329#[target_feature(enable = "sve,sve2")]
3330#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3331#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))]
3332pub fn svcmla_s16<const IMM_ROTATION: i32>(
3333 op1: svint16_t,
3334 op2: svint16_t,
3335 op3: svint16_t,
3336) -> svint16_t {
3337 static_assert!(
3338 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
3339 );
3340 unsafe extern "unadjusted" {
3341 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv8i16")]
3342 fn _svcmla_s16(
3343 op1: svint16_t,
3344 op2: svint16_t,
3345 op3: svint16_t,
3346 imm_rotation: i32,
3347 ) -> svint16_t;
3348 }
3349 unsafe { _svcmla_s16(op1, op2, op3, IMM_ROTATION) }
3350}
3351#[doc = "Complex multiply-add with rotate"]
3352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s32])"]
3353#[inline(always)]
3354#[target_feature(enable = "sve,sve2")]
3355#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3356#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))]
3357pub fn svcmla_s32<const IMM_ROTATION: i32>(
3358 op1: svint32_t,
3359 op2: svint32_t,
3360 op3: svint32_t,
3361) -> svint32_t {
3362 static_assert!(
3363 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
3364 );
3365 unsafe extern "unadjusted" {
3366 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv4i32")]
3367 fn _svcmla_s32(
3368 op1: svint32_t,
3369 op2: svint32_t,
3370 op3: svint32_t,
3371 imm_rotation: i32,
3372 ) -> svint32_t;
3373 }
3374 unsafe { _svcmla_s32(op1, op2, op3, IMM_ROTATION) }
3375}
3376#[doc = "Complex multiply-add with rotate"]
3377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s64])"]
3378#[inline(always)]
3379#[target_feature(enable = "sve,sve2")]
3380#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3381#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))]
3382pub fn svcmla_s64<const IMM_ROTATION: i32>(
3383 op1: svint64_t,
3384 op2: svint64_t,
3385 op3: svint64_t,
3386) -> svint64_t {
3387 static_assert!(
3388 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
3389 );
3390 unsafe extern "unadjusted" {
3391 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv2i64")]
3392 fn _svcmla_s64(
3393 op1: svint64_t,
3394 op2: svint64_t,
3395 op3: svint64_t,
3396 imm_rotation: i32,
3397 ) -> svint64_t;
3398 }
3399 unsafe { _svcmla_s64(op1, op2, op3, IMM_ROTATION) }
3400}
3401#[doc = "Complex multiply-add with rotate"]
3402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u8])"]
3403#[inline(always)]
3404#[target_feature(enable = "sve,sve2")]
3405#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3406#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))]
3407pub fn svcmla_u8<const IMM_ROTATION: i32>(
3408 op1: svuint8_t,
3409 op2: svuint8_t,
3410 op3: svuint8_t,
3411) -> svuint8_t {
3412 static_assert!(
3413 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
3414 );
3415 unsafe {
3416 svcmla_s8::<IMM_ROTATION>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
3417 }
3418}
3419#[doc = "Complex multiply-add with rotate"]
3420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u16])"]
3421#[inline(always)]
3422#[target_feature(enable = "sve,sve2")]
3423#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3424#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))]
3425pub fn svcmla_u16<const IMM_ROTATION: i32>(
3426 op1: svuint16_t,
3427 op2: svuint16_t,
3428 op3: svuint16_t,
3429) -> svuint16_t {
3430 static_assert!(
3431 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
3432 );
3433 unsafe {
3434 svcmla_s16::<IMM_ROTATION>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
3435 }
3436}
3437#[doc = "Complex multiply-add with rotate"]
3438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u32])"]
3439#[inline(always)]
3440#[target_feature(enable = "sve,sve2")]
3441#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3442#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))]
3443pub fn svcmla_u32<const IMM_ROTATION: i32>(
3444 op1: svuint32_t,
3445 op2: svuint32_t,
3446 op3: svuint32_t,
3447) -> svuint32_t {
3448 static_assert!(
3449 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
3450 );
3451 unsafe {
3452 svcmla_s32::<IMM_ROTATION>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
3453 }
3454}
3455#[doc = "Complex multiply-add with rotate"]
3456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u64])"]
3457#[inline(always)]
3458#[target_feature(enable = "sve,sve2")]
3459#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3460#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))]
3461pub fn svcmla_u64<const IMM_ROTATION: i32>(
3462 op1: svuint64_t,
3463 op2: svuint64_t,
3464 op3: svuint64_t,
3465) -> svuint64_t {
3466 static_assert!(
3467 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
3468 );
3469 unsafe {
3470 svcmla_s64::<IMM_ROTATION>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
3471 }
3472}
3473#[doc = "Up convert long (top)"]
3474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtlt_f64[_f32]_m)"]
3475#[inline(always)]
3476#[target_feature(enable = "sve,sve2")]
3477#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3478#[cfg_attr(test, assert_instr(fcvtlt))]
3479pub fn svcvtlt_f64_f32_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat32_t) -> svfloat64_t {
3480 unsafe extern "unadjusted" {
3481 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtlt.f64f32")]
3482 fn _svcvtlt_f64_f32_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat32_t)
3483 -> svfloat64_t;
3484 }
3485 unsafe { _svcvtlt_f64_f32_m(inactive, pg.sve_into(), op) }
3486}
3487#[doc = "Up convert long (top)"]
3488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtlt_f64[_f32]_x)"]
3489#[inline(always)]
3490#[target_feature(enable = "sve,sve2")]
3491#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3492#[cfg_attr(test, assert_instr(fcvtlt))]
3493pub fn svcvtlt_f64_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat64_t {
3494 unsafe { svcvtlt_f64_f32_m(crate::intrinsics::transmute_unchecked(op), pg, op) }
3495}
3496#[doc = "Down convert and narrow (top)"]
3497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtnt_f32[_f64]_m)"]
3498#[inline(always)]
3499#[target_feature(enable = "sve,sve2")]
3500#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3501#[cfg_attr(test, assert_instr(fcvtnt))]
3502pub fn svcvtnt_f32_f64_m(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
3503 unsafe extern "unadjusted" {
3504 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtnt.f32f64")]
3505 fn _svcvtnt_f32_f64_m(even: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t;
3506 }
3507 unsafe { _svcvtnt_f32_f64_m(even, pg.sve_into(), op) }
3508}
3509#[doc = "Down convert and narrow (top)"]
3510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtnt_f32[_f64]_x)"]
3511#[inline(always)]
3512#[target_feature(enable = "sve,sve2")]
3513#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3514#[cfg_attr(test, assert_instr(fcvtnt))]
3515pub fn svcvtnt_f32_f64_x(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
3516 svcvtnt_f32_f64_m(even, pg, op)
3517}
3518#[doc = "Down convert, rounding to odd"]
3519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_m)"]
3520#[inline(always)]
3521#[target_feature(enable = "sve,sve2")]
3522#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3523#[cfg_attr(test, assert_instr(fcvtx))]
3524pub fn svcvtx_f32_f64_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
3525 unsafe extern "unadjusted" {
3526 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtx.f32f64")]
3527 fn _svcvtx_f32_f64_m(inactive: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t;
3528 }
3529 unsafe { _svcvtx_f32_f64_m(inactive, pg.sve_into(), op) }
3530}
3531#[doc = "Down convert, rounding to odd"]
3532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_x)"]
3533#[inline(always)]
3534#[target_feature(enable = "sve,sve2")]
3535#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3536#[cfg_attr(test, assert_instr(fcvtx))]
3537pub fn svcvtx_f32_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
3538 unsafe { svcvtx_f32_f64_m(transmute_unchecked(op), pg, op) }
3539}
3540#[doc = "Down convert, rounding to odd"]
3541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_z)"]
3542#[inline(always)]
3543#[target_feature(enable = "sve,sve2")]
3544#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3545#[cfg_attr(test, assert_instr(fcvtx))]
3546pub fn svcvtx_f32_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
3547 svcvtx_f32_f64_m(svdup_n_f32(0.0), pg, op)
3548}
3549#[doc = "Down convert, rounding to odd (top)"]
3550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtxnt_f32[_f64]_m)"]
3551#[inline(always)]
3552#[target_feature(enable = "sve,sve2")]
3553#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3554#[cfg_attr(test, assert_instr(fcvtxnt))]
3555pub fn svcvtxnt_f32_f64_m(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
3556 unsafe extern "unadjusted" {
3557 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtxnt.f32f64")]
3558 fn _svcvtxnt_f32_f64_m(even: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t;
3559 }
3560 unsafe { _svcvtxnt_f32_f64_m(even, pg.sve_into(), op) }
3561}
3562#[doc = "Down convert, rounding to odd (top)"]
3563#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtxnt_f32[_f64]_x)"]
3564#[inline(always)]
3565#[target_feature(enable = "sve,sve2")]
3566#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3567#[cfg_attr(test, assert_instr(fcvtxnt))]
3568pub fn svcvtxnt_f32_f64_x(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
3569 svcvtxnt_f32_f64_m(even, pg, op)
3570}
3571#[doc = "Bitwise exclusive OR of three vectors"]
3572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s8])"]
3573#[inline(always)]
3574#[target_feature(enable = "sve,sve2")]
3575#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3576#[cfg_attr(test, assert_instr(eor3))]
3577pub fn sveor3_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
3578 unsafe extern "unadjusted" {
3579 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv16i8")]
3580 fn _sveor3_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
3581 }
3582 unsafe { _sveor3_s8(op1, op2, op3) }
3583}
3584#[doc = "Bitwise exclusive OR of three vectors"]
3585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s8])"]
3586#[inline(always)]
3587#[target_feature(enable = "sve,sve2")]
3588#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3589#[cfg_attr(test, assert_instr(eor3))]
3590pub fn sveor3_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
3591 sveor3_s8(op1, op2, svdup_n_s8(op3))
3592}
3593#[doc = "Bitwise exclusive OR of three vectors"]
3594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s16])"]
3595#[inline(always)]
3596#[target_feature(enable = "sve,sve2")]
3597#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3598#[cfg_attr(test, assert_instr(eor3))]
3599pub fn sveor3_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
3600 unsafe extern "unadjusted" {
3601 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv8i16")]
3602 fn _sveor3_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
3603 }
3604 unsafe { _sveor3_s16(op1, op2, op3) }
3605}
3606#[doc = "Bitwise exclusive OR of three vectors"]
3607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s16])"]
3608#[inline(always)]
3609#[target_feature(enable = "sve,sve2")]
3610#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3611#[cfg_attr(test, assert_instr(eor3))]
3612pub fn sveor3_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
3613 sveor3_s16(op1, op2, svdup_n_s16(op3))
3614}
3615#[doc = "Bitwise exclusive OR of three vectors"]
3616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s32])"]
3617#[inline(always)]
3618#[target_feature(enable = "sve,sve2")]
3619#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3620#[cfg_attr(test, assert_instr(eor3))]
3621pub fn sveor3_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
3622 unsafe extern "unadjusted" {
3623 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv4i32")]
3624 fn _sveor3_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
3625 }
3626 unsafe { _sveor3_s32(op1, op2, op3) }
3627}
3628#[doc = "Bitwise exclusive OR of three vectors"]
3629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s32])"]
3630#[inline(always)]
3631#[target_feature(enable = "sve,sve2")]
3632#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3633#[cfg_attr(test, assert_instr(eor3))]
3634pub fn sveor3_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
3635 sveor3_s32(op1, op2, svdup_n_s32(op3))
3636}
3637#[doc = "Bitwise exclusive OR of three vectors"]
3638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s64])"]
3639#[inline(always)]
3640#[target_feature(enable = "sve,sve2")]
3641#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3642#[cfg_attr(test, assert_instr(eor3))]
3643pub fn sveor3_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
3644 unsafe extern "unadjusted" {
3645 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv2i64")]
3646 fn _sveor3_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
3647 }
3648 unsafe { _sveor3_s64(op1, op2, op3) }
3649}
3650#[doc = "Bitwise exclusive OR of three vectors"]
3651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s64])"]
3652#[inline(always)]
3653#[target_feature(enable = "sve,sve2")]
3654#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3655#[cfg_attr(test, assert_instr(eor3))]
3656pub fn sveor3_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
3657 sveor3_s64(op1, op2, svdup_n_s64(op3))
3658}
3659#[doc = "Bitwise exclusive OR of three vectors"]
3660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u8])"]
3661#[inline(always)]
3662#[target_feature(enable = "sve,sve2")]
3663#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3664#[cfg_attr(test, assert_instr(eor3))]
3665pub fn sveor3_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
3666 unsafe { sveor3_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
3667}
3668#[doc = "Bitwise exclusive OR of three vectors"]
3669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u8])"]
3670#[inline(always)]
3671#[target_feature(enable = "sve,sve2")]
3672#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3673#[cfg_attr(test, assert_instr(eor3))]
3674pub fn sveor3_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
3675 sveor3_u8(op1, op2, svdup_n_u8(op3))
3676}
3677#[doc = "Bitwise exclusive OR of three vectors"]
3678#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u16])"]
3679#[inline(always)]
3680#[target_feature(enable = "sve,sve2")]
3681#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3682#[cfg_attr(test, assert_instr(eor3))]
3683pub fn sveor3_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
3684 unsafe { sveor3_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
3685}
3686#[doc = "Bitwise exclusive OR of three vectors"]
3687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u16])"]
3688#[inline(always)]
3689#[target_feature(enable = "sve,sve2")]
3690#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3691#[cfg_attr(test, assert_instr(eor3))]
3692pub fn sveor3_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
3693 sveor3_u16(op1, op2, svdup_n_u16(op3))
3694}
3695#[doc = "Bitwise exclusive OR of three vectors"]
3696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u32])"]
3697#[inline(always)]
3698#[target_feature(enable = "sve,sve2")]
3699#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3700#[cfg_attr(test, assert_instr(eor3))]
3701pub fn sveor3_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
3702 unsafe { sveor3_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
3703}
3704#[doc = "Bitwise exclusive OR of three vectors"]
3705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u32])"]
3706#[inline(always)]
3707#[target_feature(enable = "sve,sve2")]
3708#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3709#[cfg_attr(test, assert_instr(eor3))]
3710pub fn sveor3_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
3711 sveor3_u32(op1, op2, svdup_n_u32(op3))
3712}
3713#[doc = "Bitwise exclusive OR of three vectors"]
3714#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u64])"]
3715#[inline(always)]
3716#[target_feature(enable = "sve,sve2")]
3717#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3718#[cfg_attr(test, assert_instr(eor3))]
3719pub fn sveor3_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
3720 unsafe { sveor3_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
3721}
3722#[doc = "Bitwise exclusive OR of three vectors"]
3723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u64])"]
3724#[inline(always)]
3725#[target_feature(enable = "sve,sve2")]
3726#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3727#[cfg_attr(test, assert_instr(eor3))]
3728pub fn sveor3_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
3729 sveor3_u64(op1, op2, svdup_n_u64(op3))
3730}
3731#[doc = "Interleaving exclusive OR (bottom, top)"]
3732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s8])"]
3733#[inline(always)]
3734#[target_feature(enable = "sve,sve2")]
3735#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3736#[cfg_attr(test, assert_instr(eorbt))]
3737pub fn sveorbt_s8(odd: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
3738 unsafe extern "unadjusted" {
3739 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv16i8")]
3740 fn _sveorbt_s8(odd: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
3741 }
3742 unsafe { _sveorbt_s8(odd, op1, op2) }
3743}
3744#[doc = "Interleaving exclusive OR (bottom, top)"]
3745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s8])"]
3746#[inline(always)]
3747#[target_feature(enable = "sve,sve2")]
3748#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3749#[cfg_attr(test, assert_instr(eorbt))]
3750pub fn sveorbt_n_s8(odd: svint8_t, op1: svint8_t, op2: i8) -> svint8_t {
3751 sveorbt_s8(odd, op1, svdup_n_s8(op2))
3752}
3753#[doc = "Interleaving exclusive OR (bottom, top)"]
3754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s16])"]
3755#[inline(always)]
3756#[target_feature(enable = "sve,sve2")]
3757#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3758#[cfg_attr(test, assert_instr(eorbt))]
3759pub fn sveorbt_s16(odd: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
3760 unsafe extern "unadjusted" {
3761 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv8i16")]
3762 fn _sveorbt_s16(odd: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
3763 }
3764 unsafe { _sveorbt_s16(odd, op1, op2) }
3765}
3766#[doc = "Interleaving exclusive OR (bottom, top)"]
3767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s16])"]
3768#[inline(always)]
3769#[target_feature(enable = "sve,sve2")]
3770#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3771#[cfg_attr(test, assert_instr(eorbt))]
3772pub fn sveorbt_n_s16(odd: svint16_t, op1: svint16_t, op2: i16) -> svint16_t {
3773 sveorbt_s16(odd, op1, svdup_n_s16(op2))
3774}
3775#[doc = "Interleaving exclusive OR (bottom, top)"]
3776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s32])"]
3777#[inline(always)]
3778#[target_feature(enable = "sve,sve2")]
3779#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3780#[cfg_attr(test, assert_instr(eorbt))]
3781pub fn sveorbt_s32(odd: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
3782 unsafe extern "unadjusted" {
3783 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv4i32")]
3784 fn _sveorbt_s32(odd: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
3785 }
3786 unsafe { _sveorbt_s32(odd, op1, op2) }
3787}
3788#[doc = "Interleaving exclusive OR (bottom, top)"]
3789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s32])"]
3790#[inline(always)]
3791#[target_feature(enable = "sve,sve2")]
3792#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3793#[cfg_attr(test, assert_instr(eorbt))]
3794pub fn sveorbt_n_s32(odd: svint32_t, op1: svint32_t, op2: i32) -> svint32_t {
3795 sveorbt_s32(odd, op1, svdup_n_s32(op2))
3796}
3797#[doc = "Interleaving exclusive OR (bottom, top)"]
3798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s64])"]
3799#[inline(always)]
3800#[target_feature(enable = "sve,sve2")]
3801#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3802#[cfg_attr(test, assert_instr(eorbt))]
3803pub fn sveorbt_s64(odd: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
3804 unsafe extern "unadjusted" {
3805 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv2i64")]
3806 fn _sveorbt_s64(odd: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
3807 }
3808 unsafe { _sveorbt_s64(odd, op1, op2) }
3809}
3810#[doc = "Interleaving exclusive OR (bottom, top)"]
3811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s64])"]
3812#[inline(always)]
3813#[target_feature(enable = "sve,sve2")]
3814#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3815#[cfg_attr(test, assert_instr(eorbt))]
3816pub fn sveorbt_n_s64(odd: svint64_t, op1: svint64_t, op2: i64) -> svint64_t {
3817 sveorbt_s64(odd, op1, svdup_n_s64(op2))
3818}
3819#[doc = "Interleaving exclusive OR (bottom, top)"]
3820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u8])"]
3821#[inline(always)]
3822#[target_feature(enable = "sve,sve2")]
3823#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3824#[cfg_attr(test, assert_instr(eorbt))]
3825pub fn sveorbt_u8(odd: svuint8_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
3826 unsafe { sveorbt_s8(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
3827}
3828#[doc = "Interleaving exclusive OR (bottom, top)"]
3829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u8])"]
3830#[inline(always)]
3831#[target_feature(enable = "sve,sve2")]
3832#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3833#[cfg_attr(test, assert_instr(eorbt))]
3834pub fn sveorbt_n_u8(odd: svuint8_t, op1: svuint8_t, op2: u8) -> svuint8_t {
3835 sveorbt_u8(odd, op1, svdup_n_u8(op2))
3836}
3837#[doc = "Interleaving exclusive OR (bottom, top)"]
3838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u16])"]
3839#[inline(always)]
3840#[target_feature(enable = "sve,sve2")]
3841#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3842#[cfg_attr(test, assert_instr(eorbt))]
3843pub fn sveorbt_u16(odd: svuint16_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
3844 unsafe { sveorbt_s16(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
3845}
3846#[doc = "Interleaving exclusive OR (bottom, top)"]
3847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u16])"]
3848#[inline(always)]
3849#[target_feature(enable = "sve,sve2")]
3850#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3851#[cfg_attr(test, assert_instr(eorbt))]
3852pub fn sveorbt_n_u16(odd: svuint16_t, op1: svuint16_t, op2: u16) -> svuint16_t {
3853 sveorbt_u16(odd, op1, svdup_n_u16(op2))
3854}
3855#[doc = "Interleaving exclusive OR (bottom, top)"]
3856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u32])"]
3857#[inline(always)]
3858#[target_feature(enable = "sve,sve2")]
3859#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3860#[cfg_attr(test, assert_instr(eorbt))]
3861pub fn sveorbt_u32(odd: svuint32_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
3862 unsafe { sveorbt_s32(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
3863}
3864#[doc = "Interleaving exclusive OR (bottom, top)"]
3865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u32])"]
3866#[inline(always)]
3867#[target_feature(enable = "sve,sve2")]
3868#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3869#[cfg_attr(test, assert_instr(eorbt))]
3870pub fn sveorbt_n_u32(odd: svuint32_t, op1: svuint32_t, op2: u32) -> svuint32_t {
3871 sveorbt_u32(odd, op1, svdup_n_u32(op2))
3872}
3873#[doc = "Interleaving exclusive OR (bottom, top)"]
3874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u64])"]
3875#[inline(always)]
3876#[target_feature(enable = "sve,sve2")]
3877#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3878#[cfg_attr(test, assert_instr(eorbt))]
3879pub fn sveorbt_u64(odd: svuint64_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
3880 unsafe { sveorbt_s64(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
3881}
3882#[doc = "Interleaving exclusive OR (bottom, top)"]
3883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u64])"]
3884#[inline(always)]
3885#[target_feature(enable = "sve,sve2")]
3886#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3887#[cfg_attr(test, assert_instr(eorbt))]
3888pub fn sveorbt_n_u64(odd: svuint64_t, op1: svuint64_t, op2: u64) -> svuint64_t {
3889 sveorbt_u64(odd, op1, svdup_n_u64(op2))
3890}
3891#[doc = "Interleaving exclusive OR (top, bottom)"]
3892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s8])"]
3893#[inline(always)]
3894#[target_feature(enable = "sve,sve2")]
3895#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3896#[cfg_attr(test, assert_instr(eortb))]
3897pub fn sveortb_s8(even: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
3898 unsafe extern "unadjusted" {
3899 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv16i8")]
3900 fn _sveortb_s8(even: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
3901 }
3902 unsafe { _sveortb_s8(even, op1, op2) }
3903}
3904#[doc = "Interleaving exclusive OR (top, bottom)"]
3905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s8])"]
3906#[inline(always)]
3907#[target_feature(enable = "sve,sve2")]
3908#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3909#[cfg_attr(test, assert_instr(eortb))]
3910pub fn sveortb_n_s8(even: svint8_t, op1: svint8_t, op2: i8) -> svint8_t {
3911 sveortb_s8(even, op1, svdup_n_s8(op2))
3912}
3913#[doc = "Interleaving exclusive OR (top, bottom)"]
3914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s16])"]
3915#[inline(always)]
3916#[target_feature(enable = "sve,sve2")]
3917#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3918#[cfg_attr(test, assert_instr(eortb))]
3919pub fn sveortb_s16(even: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
3920 unsafe extern "unadjusted" {
3921 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv8i16")]
3922 fn _sveortb_s16(even: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
3923 }
3924 unsafe { _sveortb_s16(even, op1, op2) }
3925}
3926#[doc = "Interleaving exclusive OR (top, bottom)"]
3927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s16])"]
3928#[inline(always)]
3929#[target_feature(enable = "sve,sve2")]
3930#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3931#[cfg_attr(test, assert_instr(eortb))]
3932pub fn sveortb_n_s16(even: svint16_t, op1: svint16_t, op2: i16) -> svint16_t {
3933 sveortb_s16(even, op1, svdup_n_s16(op2))
3934}
3935#[doc = "Interleaving exclusive OR (top, bottom)"]
3936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s32])"]
3937#[inline(always)]
3938#[target_feature(enable = "sve,sve2")]
3939#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3940#[cfg_attr(test, assert_instr(eortb))]
3941pub fn sveortb_s32(even: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
3942 unsafe extern "unadjusted" {
3943 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv4i32")]
3944 fn _sveortb_s32(even: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
3945 }
3946 unsafe { _sveortb_s32(even, op1, op2) }
3947}
3948#[doc = "Interleaving exclusive OR (top, bottom)"]
3949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s32])"]
3950#[inline(always)]
3951#[target_feature(enable = "sve,sve2")]
3952#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3953#[cfg_attr(test, assert_instr(eortb))]
3954pub fn sveortb_n_s32(even: svint32_t, op1: svint32_t, op2: i32) -> svint32_t {
3955 sveortb_s32(even, op1, svdup_n_s32(op2))
3956}
3957#[doc = "Interleaving exclusive OR (top, bottom)"]
3958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s64])"]
3959#[inline(always)]
3960#[target_feature(enable = "sve,sve2")]
3961#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3962#[cfg_attr(test, assert_instr(eortb))]
3963pub fn sveortb_s64(even: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
3964 unsafe extern "unadjusted" {
3965 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv2i64")]
3966 fn _sveortb_s64(even: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
3967 }
3968 unsafe { _sveortb_s64(even, op1, op2) }
3969}
3970#[doc = "Interleaving exclusive OR (top, bottom)"]
3971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s64])"]
3972#[inline(always)]
3973#[target_feature(enable = "sve,sve2")]
3974#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3975#[cfg_attr(test, assert_instr(eortb))]
3976pub fn sveortb_n_s64(even: svint64_t, op1: svint64_t, op2: i64) -> svint64_t {
3977 sveortb_s64(even, op1, svdup_n_s64(op2))
3978}
3979#[doc = "Interleaving exclusive OR (top, bottom)"]
3980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u8])"]
3981#[inline(always)]
3982#[target_feature(enable = "sve,sve2")]
3983#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3984#[cfg_attr(test, assert_instr(eortb))]
3985pub fn sveortb_u8(even: svuint8_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
3986 unsafe { sveortb_s8(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
3987}
3988#[doc = "Interleaving exclusive OR (top, bottom)"]
3989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u8])"]
3990#[inline(always)]
3991#[target_feature(enable = "sve,sve2")]
3992#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3993#[cfg_attr(test, assert_instr(eortb))]
3994pub fn sveortb_n_u8(even: svuint8_t, op1: svuint8_t, op2: u8) -> svuint8_t {
3995 sveortb_u8(even, op1, svdup_n_u8(op2))
3996}
3997#[doc = "Interleaving exclusive OR (top, bottom)"]
3998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u16])"]
3999#[inline(always)]
4000#[target_feature(enable = "sve,sve2")]
4001#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4002#[cfg_attr(test, assert_instr(eortb))]
4003pub fn sveortb_u16(even: svuint16_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
4004 unsafe { sveortb_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
4005}
4006#[doc = "Interleaving exclusive OR (top, bottom)"]
4007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u16])"]
4008#[inline(always)]
4009#[target_feature(enable = "sve,sve2")]
4010#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4011#[cfg_attr(test, assert_instr(eortb))]
4012pub fn sveortb_n_u16(even: svuint16_t, op1: svuint16_t, op2: u16) -> svuint16_t {
4013 sveortb_u16(even, op1, svdup_n_u16(op2))
4014}
4015#[doc = "Interleaving exclusive OR (top, bottom)"]
4016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u32])"]
4017#[inline(always)]
4018#[target_feature(enable = "sve,sve2")]
4019#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4020#[cfg_attr(test, assert_instr(eortb))]
4021pub fn sveortb_u32(even: svuint32_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
4022 unsafe { sveortb_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
4023}
4024#[doc = "Interleaving exclusive OR (top, bottom)"]
4025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u32])"]
4026#[inline(always)]
4027#[target_feature(enable = "sve,sve2")]
4028#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4029#[cfg_attr(test, assert_instr(eortb))]
4030pub fn sveortb_n_u32(even: svuint32_t, op1: svuint32_t, op2: u32) -> svuint32_t {
4031 sveortb_u32(even, op1, svdup_n_u32(op2))
4032}
4033#[doc = "Interleaving exclusive OR (top, bottom)"]
4034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u64])"]
4035#[inline(always)]
4036#[target_feature(enable = "sve,sve2")]
4037#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4038#[cfg_attr(test, assert_instr(eortb))]
4039pub fn sveortb_u64(even: svuint64_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
4040 unsafe { sveortb_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
4041}
4042#[doc = "Interleaving exclusive OR (top, bottom)"]
4043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u64])"]
4044#[inline(always)]
4045#[target_feature(enable = "sve,sve2")]
4046#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4047#[cfg_attr(test, assert_instr(eortb))]
4048pub fn sveortb_n_u64(even: svuint64_t, op1: svuint64_t, op2: u64) -> svuint64_t {
4049 sveortb_u64(even, op1, svdup_n_u64(op2))
4050}
4051#[doc = "Halving add"]
4052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_m)"]
4053#[inline(always)]
4054#[target_feature(enable = "sve,sve2")]
4055#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4056#[cfg_attr(test, assert_instr(shadd))]
4057pub fn svhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
4058 unsafe extern "unadjusted" {
4059 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv16i8")]
4060 fn _svhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
4061 }
4062 unsafe { _svhadd_s8_m(pg, op1, op2) }
4063}
4064#[doc = "Halving add"]
4065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_m)"]
4066#[inline(always)]
4067#[target_feature(enable = "sve,sve2")]
4068#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4069#[cfg_attr(test, assert_instr(shadd))]
4070pub fn svhadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
4071 svhadd_s8_m(pg, op1, svdup_n_s8(op2))
4072}
4073#[doc = "Halving add"]
4074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_x)"]
4075#[inline(always)]
4076#[target_feature(enable = "sve,sve2")]
4077#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4078#[cfg_attr(test, assert_instr(shadd))]
4079pub fn svhadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
4080 svhadd_s8_m(pg, op1, op2)
4081}
4082#[doc = "Halving add"]
4083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_x)"]
4084#[inline(always)]
4085#[target_feature(enable = "sve,sve2")]
4086#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4087#[cfg_attr(test, assert_instr(shadd))]
4088pub fn svhadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
4089 svhadd_s8_x(pg, op1, svdup_n_s8(op2))
4090}
4091#[doc = "Halving add"]
4092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_z)"]
4093#[inline(always)]
4094#[target_feature(enable = "sve,sve2")]
4095#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4096#[cfg_attr(test, assert_instr(shadd))]
4097pub fn svhadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
4098 svhadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
4099}
4100#[doc = "Halving add"]
4101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_z)"]
4102#[inline(always)]
4103#[target_feature(enable = "sve,sve2")]
4104#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4105#[cfg_attr(test, assert_instr(shadd))]
4106pub fn svhadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
4107 svhadd_s8_z(pg, op1, svdup_n_s8(op2))
4108}
4109#[doc = "Halving add"]
4110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_m)"]
4111#[inline(always)]
4112#[target_feature(enable = "sve,sve2")]
4113#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4114#[cfg_attr(test, assert_instr(shadd))]
4115pub fn svhadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
4116 unsafe extern "unadjusted" {
4117 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv8i16")]
4118 fn _svhadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
4119 }
4120 unsafe { _svhadd_s16_m(pg.sve_into(), op1, op2) }
4121}
4122#[doc = "Halving add"]
4123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_m)"]
4124#[inline(always)]
4125#[target_feature(enable = "sve,sve2")]
4126#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4127#[cfg_attr(test, assert_instr(shadd))]
4128pub fn svhadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
4129 svhadd_s16_m(pg, op1, svdup_n_s16(op2))
4130}
4131#[doc = "Halving add"]
4132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_x)"]
4133#[inline(always)]
4134#[target_feature(enable = "sve,sve2")]
4135#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4136#[cfg_attr(test, assert_instr(shadd))]
4137pub fn svhadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
4138 svhadd_s16_m(pg, op1, op2)
4139}
4140#[doc = "Halving add"]
4141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_x)"]
4142#[inline(always)]
4143#[target_feature(enable = "sve,sve2")]
4144#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4145#[cfg_attr(test, assert_instr(shadd))]
4146pub fn svhadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
4147 svhadd_s16_x(pg, op1, svdup_n_s16(op2))
4148}
4149#[doc = "Halving add"]
4150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_z)"]
4151#[inline(always)]
4152#[target_feature(enable = "sve,sve2")]
4153#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4154#[cfg_attr(test, assert_instr(shadd))]
4155pub fn svhadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
4156 svhadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
4157}
4158#[doc = "Halving add"]
4159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_z)"]
4160#[inline(always)]
4161#[target_feature(enable = "sve,sve2")]
4162#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4163#[cfg_attr(test, assert_instr(shadd))]
4164pub fn svhadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
4165 svhadd_s16_z(pg, op1, svdup_n_s16(op2))
4166}
4167#[doc = "Halving add"]
4168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_m)"]
4169#[inline(always)]
4170#[target_feature(enable = "sve,sve2")]
4171#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4172#[cfg_attr(test, assert_instr(shadd))]
4173pub fn svhadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
4174 unsafe extern "unadjusted" {
4175 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv4i32")]
4176 fn _svhadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
4177 }
4178 unsafe { _svhadd_s32_m(pg.sve_into(), op1, op2) }
4179}
4180#[doc = "Halving add"]
4181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_m)"]
4182#[inline(always)]
4183#[target_feature(enable = "sve,sve2")]
4184#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4185#[cfg_attr(test, assert_instr(shadd))]
4186pub fn svhadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
4187 svhadd_s32_m(pg, op1, svdup_n_s32(op2))
4188}
4189#[doc = "Halving add"]
4190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_x)"]
4191#[inline(always)]
4192#[target_feature(enable = "sve,sve2")]
4193#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4194#[cfg_attr(test, assert_instr(shadd))]
4195pub fn svhadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
4196 svhadd_s32_m(pg, op1, op2)
4197}
4198#[doc = "Halving add"]
4199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_x)"]
4200#[inline(always)]
4201#[target_feature(enable = "sve,sve2")]
4202#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4203#[cfg_attr(test, assert_instr(shadd))]
4204pub fn svhadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
4205 svhadd_s32_x(pg, op1, svdup_n_s32(op2))
4206}
4207#[doc = "Halving add"]
4208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_z)"]
4209#[inline(always)]
4210#[target_feature(enable = "sve,sve2")]
4211#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4212#[cfg_attr(test, assert_instr(shadd))]
4213pub fn svhadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
4214 svhadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
4215}
4216#[doc = "Halving add"]
4217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_z)"]
4218#[inline(always)]
4219#[target_feature(enable = "sve,sve2")]
4220#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4221#[cfg_attr(test, assert_instr(shadd))]
4222pub fn svhadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
4223 svhadd_s32_z(pg, op1, svdup_n_s32(op2))
4224}
4225#[doc = "Halving add"]
4226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_m)"]
4227#[inline(always)]
4228#[target_feature(enable = "sve,sve2")]
4229#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4230#[cfg_attr(test, assert_instr(shadd))]
4231pub fn svhadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
4232 unsafe extern "unadjusted" {
4233 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv2i64")]
4234 fn _svhadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
4235 }
4236 unsafe { _svhadd_s64_m(pg.sve_into(), op1, op2) }
4237}
4238#[doc = "Halving add"]
4239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_m)"]
4240#[inline(always)]
4241#[target_feature(enable = "sve,sve2")]
4242#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4243#[cfg_attr(test, assert_instr(shadd))]
4244pub fn svhadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
4245 svhadd_s64_m(pg, op1, svdup_n_s64(op2))
4246}
4247#[doc = "Halving add"]
4248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_x)"]
4249#[inline(always)]
4250#[target_feature(enable = "sve,sve2")]
4251#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4252#[cfg_attr(test, assert_instr(shadd))]
4253pub fn svhadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
4254 svhadd_s64_m(pg, op1, op2)
4255}
4256#[doc = "Halving add"]
4257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_x)"]
4258#[inline(always)]
4259#[target_feature(enable = "sve,sve2")]
4260#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4261#[cfg_attr(test, assert_instr(shadd))]
4262pub fn svhadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
4263 svhadd_s64_x(pg, op1, svdup_n_s64(op2))
4264}
4265#[doc = "Halving add"]
4266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_z)"]
4267#[inline(always)]
4268#[target_feature(enable = "sve,sve2")]
4269#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4270#[cfg_attr(test, assert_instr(shadd))]
4271pub fn svhadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
4272 svhadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
4273}
4274#[doc = "Halving add"]
4275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_z)"]
4276#[inline(always)]
4277#[target_feature(enable = "sve,sve2")]
4278#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4279#[cfg_attr(test, assert_instr(shadd))]
4280pub fn svhadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
4281 svhadd_s64_z(pg, op1, svdup_n_s64(op2))
4282}
4283#[doc = "Halving add"]
4284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_m)"]
4285#[inline(always)]
4286#[target_feature(enable = "sve,sve2")]
4287#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4288#[cfg_attr(test, assert_instr(uhadd))]
4289pub fn svhadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
4290 unsafe extern "unadjusted" {
4291 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv16i8")]
4292 fn _svhadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
4293 }
4294 unsafe { _svhadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
4295}
4296#[doc = "Halving add"]
4297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_m)"]
4298#[inline(always)]
4299#[target_feature(enable = "sve,sve2")]
4300#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4301#[cfg_attr(test, assert_instr(uhadd))]
4302pub fn svhadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
4303 svhadd_u8_m(pg, op1, svdup_n_u8(op2))
4304}
4305#[doc = "Halving add"]
4306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_x)"]
4307#[inline(always)]
4308#[target_feature(enable = "sve,sve2")]
4309#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4310#[cfg_attr(test, assert_instr(uhadd))]
4311pub fn svhadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
4312 svhadd_u8_m(pg, op1, op2)
4313}
4314#[doc = "Halving add"]
4315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_x)"]
4316#[inline(always)]
4317#[target_feature(enable = "sve,sve2")]
4318#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4319#[cfg_attr(test, assert_instr(uhadd))]
4320pub fn svhadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
4321 svhadd_u8_x(pg, op1, svdup_n_u8(op2))
4322}
4323#[doc = "Halving add"]
4324#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_z)"]
4325#[inline(always)]
4326#[target_feature(enable = "sve,sve2")]
4327#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4328#[cfg_attr(test, assert_instr(uhadd))]
4329pub fn svhadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
4330 svhadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
4331}
4332#[doc = "Halving add"]
4333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_z)"]
4334#[inline(always)]
4335#[target_feature(enable = "sve,sve2")]
4336#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4337#[cfg_attr(test, assert_instr(uhadd))]
4338pub fn svhadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
4339 svhadd_u8_z(pg, op1, svdup_n_u8(op2))
4340}
4341#[doc = "Halving add"]
4342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_m)"]
4343#[inline(always)]
4344#[target_feature(enable = "sve,sve2")]
4345#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4346#[cfg_attr(test, assert_instr(uhadd))]
4347pub fn svhadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
4348 unsafe extern "unadjusted" {
4349 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv8i16")]
4350 fn _svhadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
4351 }
4352 unsafe { _svhadd_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
4353}
4354#[doc = "Halving add"]
4355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_m)"]
4356#[inline(always)]
4357#[target_feature(enable = "sve,sve2")]
4358#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4359#[cfg_attr(test, assert_instr(uhadd))]
4360pub fn svhadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
4361 svhadd_u16_m(pg, op1, svdup_n_u16(op2))
4362}
4363#[doc = "Halving add"]
4364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_x)"]
4365#[inline(always)]
4366#[target_feature(enable = "sve,sve2")]
4367#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4368#[cfg_attr(test, assert_instr(uhadd))]
4369pub fn svhadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
4370 svhadd_u16_m(pg, op1, op2)
4371}
4372#[doc = "Halving add"]
4373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_x)"]
4374#[inline(always)]
4375#[target_feature(enable = "sve,sve2")]
4376#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4377#[cfg_attr(test, assert_instr(uhadd))]
4378pub fn svhadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
4379 svhadd_u16_x(pg, op1, svdup_n_u16(op2))
4380}
4381#[doc = "Halving add"]
4382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_z)"]
4383#[inline(always)]
4384#[target_feature(enable = "sve,sve2")]
4385#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4386#[cfg_attr(test, assert_instr(uhadd))]
4387pub fn svhadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
4388 svhadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
4389}
4390#[doc = "Halving add"]
4391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_z)"]
4392#[inline(always)]
4393#[target_feature(enable = "sve,sve2")]
4394#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4395#[cfg_attr(test, assert_instr(uhadd))]
4396pub fn svhadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
4397 svhadd_u16_z(pg, op1, svdup_n_u16(op2))
4398}
4399#[doc = "Halving add"]
4400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_m)"]
4401#[inline(always)]
4402#[target_feature(enable = "sve,sve2")]
4403#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4404#[cfg_attr(test, assert_instr(uhadd))]
4405pub fn svhadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
4406 unsafe extern "unadjusted" {
4407 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv4i32")]
4408 fn _svhadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
4409 }
4410 unsafe { _svhadd_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
4411}
4412#[doc = "Halving add"]
4413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_m)"]
4414#[inline(always)]
4415#[target_feature(enable = "sve,sve2")]
4416#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4417#[cfg_attr(test, assert_instr(uhadd))]
4418pub fn svhadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
4419 svhadd_u32_m(pg, op1, svdup_n_u32(op2))
4420}
4421#[doc = "Halving add"]
4422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_x)"]
4423#[inline(always)]
4424#[target_feature(enable = "sve,sve2")]
4425#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4426#[cfg_attr(test, assert_instr(uhadd))]
4427pub fn svhadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
4428 svhadd_u32_m(pg, op1, op2)
4429}
4430#[doc = "Halving add"]
4431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_x)"]
4432#[inline(always)]
4433#[target_feature(enable = "sve,sve2")]
4434#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4435#[cfg_attr(test, assert_instr(uhadd))]
4436pub fn svhadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
4437 svhadd_u32_x(pg, op1, svdup_n_u32(op2))
4438}
4439#[doc = "Halving add"]
4440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_z)"]
4441#[inline(always)]
4442#[target_feature(enable = "sve,sve2")]
4443#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4444#[cfg_attr(test, assert_instr(uhadd))]
4445pub fn svhadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
4446 svhadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
4447}
4448#[doc = "Halving add"]
4449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_z)"]
4450#[inline(always)]
4451#[target_feature(enable = "sve,sve2")]
4452#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4453#[cfg_attr(test, assert_instr(uhadd))]
4454pub fn svhadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
4455 svhadd_u32_z(pg, op1, svdup_n_u32(op2))
4456}
4457#[doc = "Halving add"]
4458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_m)"]
4459#[inline(always)]
4460#[target_feature(enable = "sve,sve2")]
4461#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4462#[cfg_attr(test, assert_instr(uhadd))]
4463pub fn svhadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
4464 unsafe extern "unadjusted" {
4465 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv2i64")]
4466 fn _svhadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
4467 }
4468 unsafe { _svhadd_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
4469}
4470#[doc = "Halving add"]
4471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_m)"]
4472#[inline(always)]
4473#[target_feature(enable = "sve,sve2")]
4474#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4475#[cfg_attr(test, assert_instr(uhadd))]
4476pub fn svhadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
4477 svhadd_u64_m(pg, op1, svdup_n_u64(op2))
4478}
4479#[doc = "Halving add"]
4480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_x)"]
4481#[inline(always)]
4482#[target_feature(enable = "sve,sve2")]
4483#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4484#[cfg_attr(test, assert_instr(uhadd))]
4485pub fn svhadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
4486 svhadd_u64_m(pg, op1, op2)
4487}
4488#[doc = "Halving add"]
4489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_x)"]
4490#[inline(always)]
4491#[target_feature(enable = "sve,sve2")]
4492#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4493#[cfg_attr(test, assert_instr(uhadd))]
4494pub fn svhadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
4495 svhadd_u64_x(pg, op1, svdup_n_u64(op2))
4496}
4497#[doc = "Halving add"]
4498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_z)"]
4499#[inline(always)]
4500#[target_feature(enable = "sve,sve2")]
4501#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4502#[cfg_attr(test, assert_instr(uhadd))]
4503pub fn svhadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
4504 svhadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
4505}
4506#[doc = "Halving add"]
4507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_z)"]
4508#[inline(always)]
4509#[target_feature(enable = "sve,sve2")]
4510#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4511#[cfg_attr(test, assert_instr(uhadd))]
4512pub fn svhadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
4513 svhadd_u64_z(pg, op1, svdup_n_u64(op2))
4514}
4515#[doc = "Count matching elements"]
4516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_s32]_z)"]
4517#[inline(always)]
4518#[target_feature(enable = "sve,sve2")]
4519#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4520#[cfg_attr(test, assert_instr(histcnt))]
4521pub fn svhistcnt_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svuint32_t {
4522 unsafe extern "unadjusted" {
4523 #[cfg_attr(
4524 target_arch = "aarch64",
4525 link_name = "llvm.aarch64.sve.histcnt.nxv4i32"
4526 )]
4527 fn _svhistcnt_s32_z(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
4528 }
4529 unsafe { _svhistcnt_s32_z(pg.sve_into(), op1, op2).as_unsigned() }
4530}
4531#[doc = "Count matching elements"]
4532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_s64]_z)"]
4533#[inline(always)]
4534#[target_feature(enable = "sve,sve2")]
4535#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4536#[cfg_attr(test, assert_instr(histcnt))]
4537pub fn svhistcnt_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svuint64_t {
4538 unsafe extern "unadjusted" {
4539 #[cfg_attr(
4540 target_arch = "aarch64",
4541 link_name = "llvm.aarch64.sve.histcnt.nxv2i64"
4542 )]
4543 fn _svhistcnt_s64_z(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
4544 }
4545 unsafe { _svhistcnt_s64_z(pg.sve_into(), op1, op2).as_unsigned() }
4546}
4547#[doc = "Count matching elements"]
4548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_u32]_z)"]
4549#[inline(always)]
4550#[target_feature(enable = "sve,sve2")]
4551#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4552#[cfg_attr(test, assert_instr(histcnt))]
4553pub fn svhistcnt_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
4554 unsafe { svhistcnt_s32_z(pg, op1.as_signed(), op2.as_signed()) }
4555}
4556#[doc = "Count matching elements"]
4557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_u64]_z)"]
4558#[inline(always)]
4559#[target_feature(enable = "sve,sve2")]
4560#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4561#[cfg_attr(test, assert_instr(histcnt))]
4562pub fn svhistcnt_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
4563 unsafe { svhistcnt_s64_z(pg, op1.as_signed(), op2.as_signed()) }
4564}
4565#[doc = "Count matching elements in 128-bit segments"]
4566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistseg[_s8])"]
4567#[inline(always)]
4568#[target_feature(enable = "sve,sve2")]
4569#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4570#[cfg_attr(test, assert_instr(histseg))]
4571pub fn svhistseg_s8(op1: svint8_t, op2: svint8_t) -> svuint8_t {
4572 unsafe extern "unadjusted" {
4573 #[cfg_attr(
4574 target_arch = "aarch64",
4575 link_name = "llvm.aarch64.sve.histseg.nxv16i8"
4576 )]
4577 fn _svhistseg_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
4578 }
4579 unsafe { _svhistseg_s8(op1, op2).as_unsigned() }
4580}
4581#[doc = "Count matching elements in 128-bit segments"]
4582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistseg[_u8])"]
4583#[inline(always)]
4584#[target_feature(enable = "sve,sve2")]
4585#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4586#[cfg_attr(test, assert_instr(histseg))]
4587pub fn svhistseg_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
4588 unsafe { svhistseg_s8(op1.as_signed(), op2.as_signed()) }
4589}
4590#[doc = "Halving subtract"]
4591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_m)"]
4592#[inline(always)]
4593#[target_feature(enable = "sve,sve2")]
4594#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4595#[cfg_attr(test, assert_instr(shsub))]
4596pub fn svhsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
4597 unsafe extern "unadjusted" {
4598 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv16i8")]
4599 fn _svhsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
4600 }
4601 unsafe { _svhsub_s8_m(pg, op1, op2) }
4602}
4603#[doc = "Halving subtract"]
4604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_m)"]
4605#[inline(always)]
4606#[target_feature(enable = "sve,sve2")]
4607#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4608#[cfg_attr(test, assert_instr(shsub))]
4609pub fn svhsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
4610 svhsub_s8_m(pg, op1, svdup_n_s8(op2))
4611}
4612#[doc = "Halving subtract"]
4613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_x)"]
4614#[inline(always)]
4615#[target_feature(enable = "sve,sve2")]
4616#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4617#[cfg_attr(test, assert_instr(shsub))]
4618pub fn svhsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
4619 svhsub_s8_m(pg, op1, op2)
4620}
4621#[doc = "Halving subtract"]
4622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_x)"]
4623#[inline(always)]
4624#[target_feature(enable = "sve,sve2")]
4625#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4626#[cfg_attr(test, assert_instr(shsub))]
4627pub fn svhsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
4628 svhsub_s8_x(pg, op1, svdup_n_s8(op2))
4629}
4630#[doc = "Halving subtract"]
4631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_z)"]
4632#[inline(always)]
4633#[target_feature(enable = "sve,sve2")]
4634#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4635#[cfg_attr(test, assert_instr(shsub))]
4636pub fn svhsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
4637 svhsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
4638}
4639#[doc = "Halving subtract"]
4640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_z)"]
4641#[inline(always)]
4642#[target_feature(enable = "sve,sve2")]
4643#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4644#[cfg_attr(test, assert_instr(shsub))]
4645pub fn svhsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
4646 svhsub_s8_z(pg, op1, svdup_n_s8(op2))
4647}
4648#[doc = "Halving subtract"]
4649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_m)"]
4650#[inline(always)]
4651#[target_feature(enable = "sve,sve2")]
4652#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4653#[cfg_attr(test, assert_instr(shsub))]
4654pub fn svhsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
4655 unsafe extern "unadjusted" {
4656 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv8i16")]
4657 fn _svhsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
4658 }
4659 unsafe { _svhsub_s16_m(pg.sve_into(), op1, op2) }
4660}
4661#[doc = "Halving subtract"]
4662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_m)"]
4663#[inline(always)]
4664#[target_feature(enable = "sve,sve2")]
4665#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4666#[cfg_attr(test, assert_instr(shsub))]
4667pub fn svhsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
4668 svhsub_s16_m(pg, op1, svdup_n_s16(op2))
4669}
4670#[doc = "Halving subtract"]
4671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_x)"]
4672#[inline(always)]
4673#[target_feature(enable = "sve,sve2")]
4674#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4675#[cfg_attr(test, assert_instr(shsub))]
4676pub fn svhsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
4677 svhsub_s16_m(pg, op1, op2)
4678}
4679#[doc = "Halving subtract"]
4680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_x)"]
4681#[inline(always)]
4682#[target_feature(enable = "sve,sve2")]
4683#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4684#[cfg_attr(test, assert_instr(shsub))]
4685pub fn svhsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
4686 svhsub_s16_x(pg, op1, svdup_n_s16(op2))
4687}
4688#[doc = "Halving subtract"]
4689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_z)"]
4690#[inline(always)]
4691#[target_feature(enable = "sve,sve2")]
4692#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4693#[cfg_attr(test, assert_instr(shsub))]
4694pub fn svhsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
4695 svhsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
4696}
4697#[doc = "Halving subtract"]
4698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_z)"]
4699#[inline(always)]
4700#[target_feature(enable = "sve,sve2")]
4701#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4702#[cfg_attr(test, assert_instr(shsub))]
4703pub fn svhsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
4704 svhsub_s16_z(pg, op1, svdup_n_s16(op2))
4705}
4706#[doc = "Halving subtract"]
4707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_m)"]
4708#[inline(always)]
4709#[target_feature(enable = "sve,sve2")]
4710#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4711#[cfg_attr(test, assert_instr(shsub))]
4712pub fn svhsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
4713 unsafe extern "unadjusted" {
4714 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv4i32")]
4715 fn _svhsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
4716 }
4717 unsafe { _svhsub_s32_m(pg.sve_into(), op1, op2) }
4718}
4719#[doc = "Halving subtract"]
4720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_m)"]
4721#[inline(always)]
4722#[target_feature(enable = "sve,sve2")]
4723#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4724#[cfg_attr(test, assert_instr(shsub))]
4725pub fn svhsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
4726 svhsub_s32_m(pg, op1, svdup_n_s32(op2))
4727}
4728#[doc = "Halving subtract"]
4729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_x)"]
4730#[inline(always)]
4731#[target_feature(enable = "sve,sve2")]
4732#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4733#[cfg_attr(test, assert_instr(shsub))]
4734pub fn svhsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
4735 svhsub_s32_m(pg, op1, op2)
4736}
4737#[doc = "Halving subtract"]
4738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_x)"]
4739#[inline(always)]
4740#[target_feature(enable = "sve,sve2")]
4741#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4742#[cfg_attr(test, assert_instr(shsub))]
4743pub fn svhsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
4744 svhsub_s32_x(pg, op1, svdup_n_s32(op2))
4745}
4746#[doc = "Halving subtract"]
4747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_z)"]
4748#[inline(always)]
4749#[target_feature(enable = "sve,sve2")]
4750#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4751#[cfg_attr(test, assert_instr(shsub))]
4752pub fn svhsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
4753 svhsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
4754}
4755#[doc = "Halving subtract"]
4756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_z)"]
4757#[inline(always)]
4758#[target_feature(enable = "sve,sve2")]
4759#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4760#[cfg_attr(test, assert_instr(shsub))]
4761pub fn svhsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
4762 svhsub_s32_z(pg, op1, svdup_n_s32(op2))
4763}
4764#[doc = "Halving subtract"]
4765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_m)"]
4766#[inline(always)]
4767#[target_feature(enable = "sve,sve2")]
4768#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4769#[cfg_attr(test, assert_instr(shsub))]
4770pub fn svhsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
4771 unsafe extern "unadjusted" {
4772 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv2i64")]
4773 fn _svhsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
4774 }
4775 unsafe { _svhsub_s64_m(pg.sve_into(), op1, op2) }
4776}
4777#[doc = "Halving subtract"]
4778#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_m)"]
4779#[inline(always)]
4780#[target_feature(enable = "sve,sve2")]
4781#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4782#[cfg_attr(test, assert_instr(shsub))]
4783pub fn svhsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
4784 svhsub_s64_m(pg, op1, svdup_n_s64(op2))
4785}
4786#[doc = "Halving subtract"]
4787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_x)"]
4788#[inline(always)]
4789#[target_feature(enable = "sve,sve2")]
4790#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4791#[cfg_attr(test, assert_instr(shsub))]
4792pub fn svhsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
4793 svhsub_s64_m(pg, op1, op2)
4794}
4795#[doc = "Halving subtract"]
4796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_x)"]
4797#[inline(always)]
4798#[target_feature(enable = "sve,sve2")]
4799#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4800#[cfg_attr(test, assert_instr(shsub))]
4801pub fn svhsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
4802 svhsub_s64_x(pg, op1, svdup_n_s64(op2))
4803}
4804#[doc = "Halving subtract"]
4805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_z)"]
4806#[inline(always)]
4807#[target_feature(enable = "sve,sve2")]
4808#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4809#[cfg_attr(test, assert_instr(shsub))]
4810pub fn svhsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
4811 svhsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
4812}
4813#[doc = "Halving subtract"]
4814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_z)"]
4815#[inline(always)]
4816#[target_feature(enable = "sve,sve2")]
4817#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4818#[cfg_attr(test, assert_instr(shsub))]
4819pub fn svhsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
4820 svhsub_s64_z(pg, op1, svdup_n_s64(op2))
4821}
4822#[doc = "Halving subtract"]
4823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_m)"]
4824#[inline(always)]
4825#[target_feature(enable = "sve,sve2")]
4826#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4827#[cfg_attr(test, assert_instr(uhsub))]
4828pub fn svhsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
4829 unsafe extern "unadjusted" {
4830 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv16i8")]
4831 fn _svhsub_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
4832 }
4833 unsafe { _svhsub_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
4834}
4835#[doc = "Halving subtract"]
4836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_m)"]
4837#[inline(always)]
4838#[target_feature(enable = "sve,sve2")]
4839#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4840#[cfg_attr(test, assert_instr(uhsub))]
4841pub fn svhsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
4842 svhsub_u8_m(pg, op1, svdup_n_u8(op2))
4843}
4844#[doc = "Halving subtract"]
4845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_x)"]
4846#[inline(always)]
4847#[target_feature(enable = "sve,sve2")]
4848#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4849#[cfg_attr(test, assert_instr(uhsub))]
4850pub fn svhsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
4851 svhsub_u8_m(pg, op1, op2)
4852}
4853#[doc = "Halving subtract"]
4854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_x)"]
4855#[inline(always)]
4856#[target_feature(enable = "sve,sve2")]
4857#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4858#[cfg_attr(test, assert_instr(uhsub))]
4859pub fn svhsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
4860 svhsub_u8_x(pg, op1, svdup_n_u8(op2))
4861}
4862#[doc = "Halving subtract"]
4863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_z)"]
4864#[inline(always)]
4865#[target_feature(enable = "sve,sve2")]
4866#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4867#[cfg_attr(test, assert_instr(uhsub))]
4868pub fn svhsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
4869 svhsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
4870}
4871#[doc = "Halving subtract"]
4872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_z)"]
4873#[inline(always)]
4874#[target_feature(enable = "sve,sve2")]
4875#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4876#[cfg_attr(test, assert_instr(uhsub))]
4877pub fn svhsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
4878 svhsub_u8_z(pg, op1, svdup_n_u8(op2))
4879}
4880#[doc = "Halving subtract"]
4881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_m)"]
4882#[inline(always)]
4883#[target_feature(enable = "sve,sve2")]
4884#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4885#[cfg_attr(test, assert_instr(uhsub))]
4886pub fn svhsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
4887 unsafe extern "unadjusted" {
4888 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv8i16")]
4889 fn _svhsub_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
4890 }
4891 unsafe { _svhsub_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
4892}
4893#[doc = "Halving subtract"]
4894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_m)"]
4895#[inline(always)]
4896#[target_feature(enable = "sve,sve2")]
4897#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4898#[cfg_attr(test, assert_instr(uhsub))]
4899pub fn svhsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
4900 svhsub_u16_m(pg, op1, svdup_n_u16(op2))
4901}
4902#[doc = "Halving subtract"]
4903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_x)"]
4904#[inline(always)]
4905#[target_feature(enable = "sve,sve2")]
4906#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4907#[cfg_attr(test, assert_instr(uhsub))]
4908pub fn svhsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
4909 svhsub_u16_m(pg, op1, op2)
4910}
4911#[doc = "Halving subtract"]
4912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_x)"]
4913#[inline(always)]
4914#[target_feature(enable = "sve,sve2")]
4915#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4916#[cfg_attr(test, assert_instr(uhsub))]
4917pub fn svhsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
4918 svhsub_u16_x(pg, op1, svdup_n_u16(op2))
4919}
4920#[doc = "Halving subtract"]
4921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_z)"]
4922#[inline(always)]
4923#[target_feature(enable = "sve,sve2")]
4924#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4925#[cfg_attr(test, assert_instr(uhsub))]
4926pub fn svhsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
4927 svhsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
4928}
4929#[doc = "Halving subtract"]
4930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_z)"]
4931#[inline(always)]
4932#[target_feature(enable = "sve,sve2")]
4933#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4934#[cfg_attr(test, assert_instr(uhsub))]
4935pub fn svhsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
4936 svhsub_u16_z(pg, op1, svdup_n_u16(op2))
4937}
4938#[doc = "Halving subtract"]
4939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_m)"]
4940#[inline(always)]
4941#[target_feature(enable = "sve,sve2")]
4942#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4943#[cfg_attr(test, assert_instr(uhsub))]
4944pub fn svhsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
4945 unsafe extern "unadjusted" {
4946 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv4i32")]
4947 fn _svhsub_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
4948 }
4949 unsafe { _svhsub_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
4950}
4951#[doc = "Halving subtract"]
4952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_m)"]
4953#[inline(always)]
4954#[target_feature(enable = "sve,sve2")]
4955#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4956#[cfg_attr(test, assert_instr(uhsub))]
4957pub fn svhsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
4958 svhsub_u32_m(pg, op1, svdup_n_u32(op2))
4959}
4960#[doc = "Halving subtract"]
4961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_x)"]
4962#[inline(always)]
4963#[target_feature(enable = "sve,sve2")]
4964#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4965#[cfg_attr(test, assert_instr(uhsub))]
4966pub fn svhsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
4967 svhsub_u32_m(pg, op1, op2)
4968}
4969#[doc = "Halving subtract"]
4970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_x)"]
4971#[inline(always)]
4972#[target_feature(enable = "sve,sve2")]
4973#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4974#[cfg_attr(test, assert_instr(uhsub))]
4975pub fn svhsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
4976 svhsub_u32_x(pg, op1, svdup_n_u32(op2))
4977}
4978#[doc = "Halving subtract"]
4979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_z)"]
4980#[inline(always)]
4981#[target_feature(enable = "sve,sve2")]
4982#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4983#[cfg_attr(test, assert_instr(uhsub))]
4984pub fn svhsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
4985 svhsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
4986}
4987#[doc = "Halving subtract"]
4988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_z)"]
4989#[inline(always)]
4990#[target_feature(enable = "sve,sve2")]
4991#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4992#[cfg_attr(test, assert_instr(uhsub))]
4993pub fn svhsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
4994 svhsub_u32_z(pg, op1, svdup_n_u32(op2))
4995}
4996#[doc = "Halving subtract"]
4997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_m)"]
4998#[inline(always)]
4999#[target_feature(enable = "sve,sve2")]
5000#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5001#[cfg_attr(test, assert_instr(uhsub))]
5002pub fn svhsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
5003 unsafe extern "unadjusted" {
5004 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv2i64")]
5005 fn _svhsub_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
5006 }
5007 unsafe { _svhsub_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
5008}
5009#[doc = "Halving subtract"]
5010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_m)"]
5011#[inline(always)]
5012#[target_feature(enable = "sve,sve2")]
5013#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5014#[cfg_attr(test, assert_instr(uhsub))]
5015pub fn svhsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
5016 svhsub_u64_m(pg, op1, svdup_n_u64(op2))
5017}
5018#[doc = "Halving subtract"]
5019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_x)"]
5020#[inline(always)]
5021#[target_feature(enable = "sve,sve2")]
5022#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5023#[cfg_attr(test, assert_instr(uhsub))]
5024pub fn svhsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
5025 svhsub_u64_m(pg, op1, op2)
5026}
5027#[doc = "Halving subtract"]
5028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_x)"]
5029#[inline(always)]
5030#[target_feature(enable = "sve,sve2")]
5031#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5032#[cfg_attr(test, assert_instr(uhsub))]
5033pub fn svhsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
5034 svhsub_u64_x(pg, op1, svdup_n_u64(op2))
5035}
5036#[doc = "Halving subtract"]
5037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_z)"]
5038#[inline(always)]
5039#[target_feature(enable = "sve,sve2")]
5040#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5041#[cfg_attr(test, assert_instr(uhsub))]
5042pub fn svhsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
5043 svhsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
5044}
5045#[doc = "Halving subtract"]
5046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_z)"]
5047#[inline(always)]
5048#[target_feature(enable = "sve,sve2")]
5049#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5050#[cfg_attr(test, assert_instr(uhsub))]
5051pub fn svhsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
5052 svhsub_u64_z(pg, op1, svdup_n_u64(op2))
5053}
5054#[doc = "Halving subtract reversed"]
5055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_m)"]
5056#[inline(always)]
5057#[target_feature(enable = "sve,sve2")]
5058#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5059#[cfg_attr(test, assert_instr(shsub))]
5060pub fn svhsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
5061 unsafe extern "unadjusted" {
5062 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv16i8")]
5063 fn _svhsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
5064 }
5065 unsafe { _svhsubr_s8_m(pg, op1, op2) }
5066}
5067#[doc = "Halving subtract reversed"]
5068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_m)"]
5069#[inline(always)]
5070#[target_feature(enable = "sve,sve2")]
5071#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5072#[cfg_attr(test, assert_instr(shsub))]
5073pub fn svhsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
5074 svhsubr_s8_m(pg, op1, svdup_n_s8(op2))
5075}
5076#[doc = "Halving subtract reversed"]
5077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_x)"]
5078#[inline(always)]
5079#[target_feature(enable = "sve,sve2")]
5080#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5081#[cfg_attr(test, assert_instr(shsub))]
5082pub fn svhsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
5083 svhsubr_s8_m(pg, op1, op2)
5084}
5085#[doc = "Halving subtract reversed"]
5086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_x)"]
5087#[inline(always)]
5088#[target_feature(enable = "sve,sve2")]
5089#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5090#[cfg_attr(test, assert_instr(shsub))]
5091pub fn svhsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
5092 svhsubr_s8_x(pg, op1, svdup_n_s8(op2))
5093}
5094#[doc = "Halving subtract reversed"]
5095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_z)"]
5096#[inline(always)]
5097#[target_feature(enable = "sve,sve2")]
5098#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5099#[cfg_attr(test, assert_instr(shsub))]
5100pub fn svhsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
5101 svhsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
5102}
5103#[doc = "Halving subtract reversed"]
5104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_z)"]
5105#[inline(always)]
5106#[target_feature(enable = "sve,sve2")]
5107#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5108#[cfg_attr(test, assert_instr(shsub))]
5109pub fn svhsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
5110 svhsubr_s8_z(pg, op1, svdup_n_s8(op2))
5111}
5112#[doc = "Halving subtract reversed"]
5113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_m)"]
5114#[inline(always)]
5115#[target_feature(enable = "sve,sve2")]
5116#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5117#[cfg_attr(test, assert_instr(shsub))]
5118pub fn svhsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
5119 unsafe extern "unadjusted" {
5120 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv8i16")]
5121 fn _svhsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
5122 }
5123 unsafe { _svhsubr_s16_m(pg.sve_into(), op1, op2) }
5124}
5125#[doc = "Halving subtract reversed"]
5126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_m)"]
5127#[inline(always)]
5128#[target_feature(enable = "sve,sve2")]
5129#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5130#[cfg_attr(test, assert_instr(shsub))]
5131pub fn svhsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
5132 svhsubr_s16_m(pg, op1, svdup_n_s16(op2))
5133}
5134#[doc = "Halving subtract reversed"]
5135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_x)"]
5136#[inline(always)]
5137#[target_feature(enable = "sve,sve2")]
5138#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5139#[cfg_attr(test, assert_instr(shsub))]
5140pub fn svhsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
5141 svhsubr_s16_m(pg, op1, op2)
5142}
5143#[doc = "Halving subtract reversed"]
5144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_x)"]
5145#[inline(always)]
5146#[target_feature(enable = "sve,sve2")]
5147#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5148#[cfg_attr(test, assert_instr(shsub))]
5149pub fn svhsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
5150 svhsubr_s16_x(pg, op1, svdup_n_s16(op2))
5151}
5152#[doc = "Halving subtract reversed"]
5153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_z)"]
5154#[inline(always)]
5155#[target_feature(enable = "sve,sve2")]
5156#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5157#[cfg_attr(test, assert_instr(shsub))]
5158pub fn svhsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
5159 svhsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
5160}
5161#[doc = "Halving subtract reversed"]
5162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_z)"]
5163#[inline(always)]
5164#[target_feature(enable = "sve,sve2")]
5165#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5166#[cfg_attr(test, assert_instr(shsub))]
5167pub fn svhsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
5168 svhsubr_s16_z(pg, op1, svdup_n_s16(op2))
5169}
5170#[doc = "Halving subtract reversed"]
5171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_m)"]
5172#[inline(always)]
5173#[target_feature(enable = "sve,sve2")]
5174#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5175#[cfg_attr(test, assert_instr(shsub))]
5176pub fn svhsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
5177 unsafe extern "unadjusted" {
5178 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv4i32")]
5179 fn _svhsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
5180 }
5181 unsafe { _svhsubr_s32_m(pg.sve_into(), op1, op2) }
5182}
5183#[doc = "Halving subtract reversed"]
5184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_m)"]
5185#[inline(always)]
5186#[target_feature(enable = "sve,sve2")]
5187#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5188#[cfg_attr(test, assert_instr(shsub))]
5189pub fn svhsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
5190 svhsubr_s32_m(pg, op1, svdup_n_s32(op2))
5191}
5192#[doc = "Halving subtract reversed"]
5193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_x)"]
5194#[inline(always)]
5195#[target_feature(enable = "sve,sve2")]
5196#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5197#[cfg_attr(test, assert_instr(shsub))]
5198pub fn svhsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
5199 svhsubr_s32_m(pg, op1, op2)
5200}
5201#[doc = "Halving subtract reversed"]
5202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_x)"]
5203#[inline(always)]
5204#[target_feature(enable = "sve,sve2")]
5205#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5206#[cfg_attr(test, assert_instr(shsub))]
5207pub fn svhsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
5208 svhsubr_s32_x(pg, op1, svdup_n_s32(op2))
5209}
5210#[doc = "Halving subtract reversed"]
5211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_z)"]
5212#[inline(always)]
5213#[target_feature(enable = "sve,sve2")]
5214#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5215#[cfg_attr(test, assert_instr(shsub))]
5216pub fn svhsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
5217 svhsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
5218}
5219#[doc = "Halving subtract reversed"]
5220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_z)"]
5221#[inline(always)]
5222#[target_feature(enable = "sve,sve2")]
5223#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5224#[cfg_attr(test, assert_instr(shsub))]
5225pub fn svhsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
5226 svhsubr_s32_z(pg, op1, svdup_n_s32(op2))
5227}
5228#[doc = "Halving subtract reversed"]
5229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_m)"]
5230#[inline(always)]
5231#[target_feature(enable = "sve,sve2")]
5232#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5233#[cfg_attr(test, assert_instr(shsub))]
5234pub fn svhsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
5235 unsafe extern "unadjusted" {
5236 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv2i64")]
5237 fn _svhsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
5238 }
5239 unsafe { _svhsubr_s64_m(pg.sve_into(), op1, op2) }
5240}
5241#[doc = "Halving subtract reversed"]
5242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_m)"]
5243#[inline(always)]
5244#[target_feature(enable = "sve,sve2")]
5245#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5246#[cfg_attr(test, assert_instr(shsub))]
5247pub fn svhsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
5248 svhsubr_s64_m(pg, op1, svdup_n_s64(op2))
5249}
5250#[doc = "Halving subtract reversed"]
5251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_x)"]
5252#[inline(always)]
5253#[target_feature(enable = "sve,sve2")]
5254#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5255#[cfg_attr(test, assert_instr(shsub))]
5256pub fn svhsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
5257 svhsubr_s64_m(pg, op1, op2)
5258}
5259#[doc = "Halving subtract reversed"]
5260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_x)"]
5261#[inline(always)]
5262#[target_feature(enable = "sve,sve2")]
5263#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5264#[cfg_attr(test, assert_instr(shsub))]
5265pub fn svhsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
5266 svhsubr_s64_x(pg, op1, svdup_n_s64(op2))
5267}
5268#[doc = "Halving subtract reversed"]
5269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_z)"]
5270#[inline(always)]
5271#[target_feature(enable = "sve,sve2")]
5272#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5273#[cfg_attr(test, assert_instr(shsub))]
5274pub fn svhsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
5275 svhsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
5276}
5277#[doc = "Halving subtract reversed"]
5278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_z)"]
5279#[inline(always)]
5280#[target_feature(enable = "sve,sve2")]
5281#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5282#[cfg_attr(test, assert_instr(shsub))]
5283pub fn svhsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
5284 svhsubr_s64_z(pg, op1, svdup_n_s64(op2))
5285}
5286#[doc = "Halving subtract reversed"]
5287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_m)"]
5288#[inline(always)]
5289#[target_feature(enable = "sve,sve2")]
5290#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5291#[cfg_attr(test, assert_instr(uhsub))]
5292pub fn svhsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
5293 unsafe extern "unadjusted" {
5294 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv16i8")]
5295 fn _svhsubr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
5296 }
5297 unsafe { _svhsubr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
5298}
5299#[doc = "Halving subtract reversed"]
5300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_m)"]
5301#[inline(always)]
5302#[target_feature(enable = "sve,sve2")]
5303#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5304#[cfg_attr(test, assert_instr(uhsub))]
5305pub fn svhsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
5306 svhsubr_u8_m(pg, op1, svdup_n_u8(op2))
5307}
5308#[doc = "Halving subtract reversed"]
5309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_x)"]
5310#[inline(always)]
5311#[target_feature(enable = "sve,sve2")]
5312#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5313#[cfg_attr(test, assert_instr(uhsub))]
5314pub fn svhsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
5315 svhsubr_u8_m(pg, op1, op2)
5316}
5317#[doc = "Halving subtract reversed"]
5318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_x)"]
5319#[inline(always)]
5320#[target_feature(enable = "sve,sve2")]
5321#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5322#[cfg_attr(test, assert_instr(uhsub))]
5323pub fn svhsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
5324 svhsubr_u8_x(pg, op1, svdup_n_u8(op2))
5325}
5326#[doc = "Halving subtract reversed"]
5327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_z)"]
5328#[inline(always)]
5329#[target_feature(enable = "sve,sve2")]
5330#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5331#[cfg_attr(test, assert_instr(uhsub))]
5332pub fn svhsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
5333 svhsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
5334}
5335#[doc = "Halving subtract reversed"]
5336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_z)"]
5337#[inline(always)]
5338#[target_feature(enable = "sve,sve2")]
5339#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5340#[cfg_attr(test, assert_instr(uhsub))]
5341pub fn svhsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
5342 svhsubr_u8_z(pg, op1, svdup_n_u8(op2))
5343}
5344#[doc = "Halving subtract reversed"]
5345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_m)"]
5346#[inline(always)]
5347#[target_feature(enable = "sve,sve2")]
5348#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5349#[cfg_attr(test, assert_instr(uhsub))]
5350pub fn svhsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
5351 unsafe extern "unadjusted" {
5352 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv8i16")]
5353 fn _svhsubr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
5354 }
5355 unsafe { _svhsubr_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
5356}
5357#[doc = "Halving subtract reversed"]
5358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_m)"]
5359#[inline(always)]
5360#[target_feature(enable = "sve,sve2")]
5361#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5362#[cfg_attr(test, assert_instr(uhsub))]
5363pub fn svhsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
5364 svhsubr_u16_m(pg, op1, svdup_n_u16(op2))
5365}
5366#[doc = "Halving subtract reversed"]
5367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_x)"]
5368#[inline(always)]
5369#[target_feature(enable = "sve,sve2")]
5370#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5371#[cfg_attr(test, assert_instr(uhsub))]
5372pub fn svhsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
5373 svhsubr_u16_m(pg, op1, op2)
5374}
5375#[doc = "Halving subtract reversed"]
5376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_x)"]
5377#[inline(always)]
5378#[target_feature(enable = "sve,sve2")]
5379#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5380#[cfg_attr(test, assert_instr(uhsub))]
5381pub fn svhsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
5382 svhsubr_u16_x(pg, op1, svdup_n_u16(op2))
5383}
5384#[doc = "Halving subtract reversed"]
5385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_z)"]
5386#[inline(always)]
5387#[target_feature(enable = "sve,sve2")]
5388#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5389#[cfg_attr(test, assert_instr(uhsub))]
5390pub fn svhsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
5391 svhsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
5392}
5393#[doc = "Halving subtract reversed"]
5394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_z)"]
5395#[inline(always)]
5396#[target_feature(enable = "sve,sve2")]
5397#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5398#[cfg_attr(test, assert_instr(uhsub))]
5399pub fn svhsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
5400 svhsubr_u16_z(pg, op1, svdup_n_u16(op2))
5401}
5402#[doc = "Halving subtract reversed"]
5403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_m)"]
5404#[inline(always)]
5405#[target_feature(enable = "sve,sve2")]
5406#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5407#[cfg_attr(test, assert_instr(uhsub))]
5408pub fn svhsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
5409 unsafe extern "unadjusted" {
5410 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv4i32")]
5411 fn _svhsubr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
5412 }
5413 unsafe { _svhsubr_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
5414}
5415#[doc = "Halving subtract reversed"]
5416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_m)"]
5417#[inline(always)]
5418#[target_feature(enable = "sve,sve2")]
5419#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5420#[cfg_attr(test, assert_instr(uhsub))]
5421pub fn svhsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
5422 svhsubr_u32_m(pg, op1, svdup_n_u32(op2))
5423}
5424#[doc = "Halving subtract reversed"]
5425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_x)"]
5426#[inline(always)]
5427#[target_feature(enable = "sve,sve2")]
5428#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5429#[cfg_attr(test, assert_instr(uhsub))]
5430pub fn svhsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
5431 svhsubr_u32_m(pg, op1, op2)
5432}
5433#[doc = "Halving subtract reversed"]
5434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_x)"]
5435#[inline(always)]
5436#[target_feature(enable = "sve,sve2")]
5437#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5438#[cfg_attr(test, assert_instr(uhsub))]
5439pub fn svhsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
5440 svhsubr_u32_x(pg, op1, svdup_n_u32(op2))
5441}
5442#[doc = "Halving subtract reversed"]
5443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_z)"]
5444#[inline(always)]
5445#[target_feature(enable = "sve,sve2")]
5446#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5447#[cfg_attr(test, assert_instr(uhsub))]
5448pub fn svhsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
5449 svhsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
5450}
5451#[doc = "Halving subtract reversed"]
5452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_z)"]
5453#[inline(always)]
5454#[target_feature(enable = "sve,sve2")]
5455#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5456#[cfg_attr(test, assert_instr(uhsub))]
5457pub fn svhsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
5458 svhsubr_u32_z(pg, op1, svdup_n_u32(op2))
5459}
5460#[doc = "Halving subtract reversed"]
5461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_m)"]
5462#[inline(always)]
5463#[target_feature(enable = "sve,sve2")]
5464#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5465#[cfg_attr(test, assert_instr(uhsub))]
5466pub fn svhsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
5467 unsafe extern "unadjusted" {
5468 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv2i64")]
5469 fn _svhsubr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
5470 }
5471 unsafe { _svhsubr_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
5472}
5473#[doc = "Halving subtract reversed"]
5474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_m)"]
5475#[inline(always)]
5476#[target_feature(enable = "sve,sve2")]
5477#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5478#[cfg_attr(test, assert_instr(uhsub))]
5479pub fn svhsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
5480 svhsubr_u64_m(pg, op1, svdup_n_u64(op2))
5481}
5482#[doc = "Halving subtract reversed"]
5483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_x)"]
5484#[inline(always)]
5485#[target_feature(enable = "sve,sve2")]
5486#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5487#[cfg_attr(test, assert_instr(uhsub))]
5488pub fn svhsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
5489 svhsubr_u64_m(pg, op1, op2)
5490}
5491#[doc = "Halving subtract reversed"]
5492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_x)"]
5493#[inline(always)]
5494#[target_feature(enable = "sve,sve2")]
5495#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5496#[cfg_attr(test, assert_instr(uhsub))]
5497pub fn svhsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
5498 svhsubr_u64_x(pg, op1, svdup_n_u64(op2))
5499}
5500#[doc = "Halving subtract reversed"]
5501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_z)"]
5502#[inline(always)]
5503#[target_feature(enable = "sve,sve2")]
5504#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5505#[cfg_attr(test, assert_instr(uhsub))]
5506pub fn svhsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
5507 svhsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
5508}
5509#[doc = "Halving subtract reversed"]
5510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_z)"]
5511#[inline(always)]
5512#[target_feature(enable = "sve,sve2")]
5513#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5514#[cfg_attr(test, assert_instr(uhsub))]
5515pub fn svhsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
5516 svhsubr_u64_z(pg, op1, svdup_n_u64(op2))
5517}
5518#[doc = "Unextended load, non-temporal"]
5519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_f64])"]
5520#[doc = "## Safety"]
5521#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5522#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5523#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5524#[inline(always)]
5525#[target_feature(enable = "sve,sve2")]
5526#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5527#[cfg_attr(test, assert_instr(ldnt1d))]
5528pub unsafe fn svldnt1_gather_s64index_f64(
5529 pg: svbool_t,
5530 base: *const f64,
5531 indices: svint64_t,
5532) -> svfloat64_t {
5533 unsafe extern "unadjusted" {
5534 #[cfg_attr(
5535 target_arch = "aarch64",
5536 link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2f64"
5537 )]
5538 fn _svldnt1_gather_s64index_f64(
5539 pg: svbool2_t,
5540 base: *const f64,
5541 indices: svint64_t,
5542 ) -> svfloat64_t;
5543 }
5544 _svldnt1_gather_s64index_f64(pg.sve_into(), base, indices)
5545}
5546#[doc = "Unextended load, non-temporal"]
5547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_s64])"]
5548#[doc = "## Safety"]
5549#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5550#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5551#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5552#[inline(always)]
5553#[target_feature(enable = "sve,sve2")]
5554#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5555#[cfg_attr(test, assert_instr(ldnt1d))]
5556pub unsafe fn svldnt1_gather_s64index_s64(
5557 pg: svbool_t,
5558 base: *const i64,
5559 indices: svint64_t,
5560) -> svint64_t {
5561 unsafe extern "unadjusted" {
5562 #[cfg_attr(
5563 target_arch = "aarch64",
5564 link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i64"
5565 )]
5566 fn _svldnt1_gather_s64index_s64(
5567 pg: svbool2_t,
5568 base: *const i64,
5569 indices: svint64_t,
5570 ) -> svint64_t;
5571 }
5572 _svldnt1_gather_s64index_s64(pg.sve_into(), base, indices)
5573}
5574#[doc = "Unextended load, non-temporal"]
5575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_u64])"]
5576#[doc = "## Safety"]
5577#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5578#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5579#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5580#[inline(always)]
5581#[target_feature(enable = "sve,sve2")]
5582#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5583#[cfg_attr(test, assert_instr(ldnt1d))]
5584pub unsafe fn svldnt1_gather_s64index_u64(
5585 pg: svbool_t,
5586 base: *const u64,
5587 indices: svint64_t,
5588) -> svuint64_t {
5589 svldnt1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned()
5590}
5591#[doc = "Unextended load, non-temporal"]
5592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_f64])"]
5593#[doc = "## Safety"]
5594#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5595#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5596#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5597#[inline(always)]
5598#[target_feature(enable = "sve,sve2")]
5599#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5600#[cfg_attr(test, assert_instr(ldnt1d))]
5601pub unsafe fn svldnt1_gather_u64index_f64(
5602 pg: svbool_t,
5603 base: *const f64,
5604 indices: svuint64_t,
5605) -> svfloat64_t {
5606 svldnt1_gather_s64index_f64(pg, base, indices.as_signed())
5607}
5608#[doc = "Unextended load, non-temporal"]
5609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_s64])"]
5610#[doc = "## Safety"]
5611#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5612#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5613#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5614#[inline(always)]
5615#[target_feature(enable = "sve,sve2")]
5616#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5617#[cfg_attr(test, assert_instr(ldnt1d))]
5618pub unsafe fn svldnt1_gather_u64index_s64(
5619 pg: svbool_t,
5620 base: *const i64,
5621 indices: svuint64_t,
5622) -> svint64_t {
5623 svldnt1_gather_s64index_s64(pg, base, indices.as_signed())
5624}
5625#[doc = "Unextended load, non-temporal"]
5626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_u64])"]
5627#[doc = "## Safety"]
5628#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5629#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5630#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5631#[inline(always)]
5632#[target_feature(enable = "sve,sve2")]
5633#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5634#[cfg_attr(test, assert_instr(ldnt1d))]
5635pub unsafe fn svldnt1_gather_u64index_u64(
5636 pg: svbool_t,
5637 base: *const u64,
5638 indices: svuint64_t,
5639) -> svuint64_t {
5640 svldnt1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned()
5641}
5642#[doc = "Unextended load, non-temporal"]
5643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_f64])"]
5644#[doc = "## Safety"]
5645#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5646#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5647#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5648#[inline(always)]
5649#[target_feature(enable = "sve,sve2")]
5650#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5651#[cfg_attr(test, assert_instr(ldnt1d))]
5652pub unsafe fn svldnt1_gather_s64offset_f64(
5653 pg: svbool_t,
5654 base: *const f64,
5655 offsets: svint64_t,
5656) -> svfloat64_t {
5657 unsafe extern "unadjusted" {
5658 #[cfg_attr(
5659 target_arch = "aarch64",
5660 link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2f64"
5661 )]
5662 fn _svldnt1_gather_s64offset_f64(
5663 pg: svbool2_t,
5664 base: *const f64,
5665 offsets: svint64_t,
5666 ) -> svfloat64_t;
5667 }
5668 _svldnt1_gather_s64offset_f64(pg.sve_into(), base, offsets)
5669}
5670#[doc = "Unextended load, non-temporal"]
5671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_s64])"]
5672#[doc = "## Safety"]
5673#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5674#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5675#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5676#[inline(always)]
5677#[target_feature(enable = "sve,sve2")]
5678#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5679#[cfg_attr(test, assert_instr(ldnt1d))]
5680pub unsafe fn svldnt1_gather_s64offset_s64(
5681 pg: svbool_t,
5682 base: *const i64,
5683 offsets: svint64_t,
5684) -> svint64_t {
5685 unsafe extern "unadjusted" {
5686 #[cfg_attr(
5687 target_arch = "aarch64",
5688 link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i64"
5689 )]
5690 fn _svldnt1_gather_s64offset_s64(
5691 pg: svbool2_t,
5692 base: *const i64,
5693 offsets: svint64_t,
5694 ) -> svint64_t;
5695 }
5696 _svldnt1_gather_s64offset_s64(pg.sve_into(), base, offsets)
5697}
5698#[doc = "Unextended load, non-temporal"]
5699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_u64])"]
5700#[doc = "## Safety"]
5701#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5702#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5703#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5704#[inline(always)]
5705#[target_feature(enable = "sve,sve2")]
5706#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5707#[cfg_attr(test, assert_instr(ldnt1d))]
5708pub unsafe fn svldnt1_gather_s64offset_u64(
5709 pg: svbool_t,
5710 base: *const u64,
5711 offsets: svint64_t,
5712) -> svuint64_t {
5713 svldnt1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned()
5714}
5715#[doc = "Unextended load, non-temporal"]
5716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_f32])"]
5717#[doc = "## Safety"]
5718#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5719#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5720#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5721#[inline(always)]
5722#[target_feature(enable = "sve,sve2")]
5723#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5724#[cfg_attr(test, assert_instr(ldnt1w))]
5725pub unsafe fn svldnt1_gather_u32offset_f32(
5726 pg: svbool_t,
5727 base: *const f32,
5728 offsets: svuint32_t,
5729) -> svfloat32_t {
5730 unsafe extern "unadjusted" {
5731 #[cfg_attr(
5732 target_arch = "aarch64",
5733 link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4f32"
5734 )]
5735 fn _svldnt1_gather_u32offset_f32(
5736 pg: svbool4_t,
5737 base: *const f32,
5738 offsets: svint32_t,
5739 ) -> svfloat32_t;
5740 }
5741 _svldnt1_gather_u32offset_f32(pg.sve_into(), base, offsets.as_signed())
5742}
5743#[doc = "Unextended load, non-temporal"]
5744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_s32])"]
5745#[doc = "## Safety"]
5746#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5747#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5748#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5749#[inline(always)]
5750#[target_feature(enable = "sve,sve2")]
5751#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5752#[cfg_attr(test, assert_instr(ldnt1w))]
5753pub unsafe fn svldnt1_gather_u32offset_s32(
5754 pg: svbool_t,
5755 base: *const i32,
5756 offsets: svuint32_t,
5757) -> svint32_t {
5758 unsafe extern "unadjusted" {
5759 #[cfg_attr(
5760 target_arch = "aarch64",
5761 link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32"
5762 )]
5763 fn _svldnt1_gather_u32offset_s32(
5764 pg: svbool4_t,
5765 base: *const i32,
5766 offsets: svint32_t,
5767 ) -> svint32_t;
5768 }
5769 _svldnt1_gather_u32offset_s32(pg.sve_into(), base, offsets.as_signed())
5770}
5771#[doc = "Unextended load, non-temporal"]
5772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_u32])"]
5773#[doc = "## Safety"]
5774#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5775#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5776#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5777#[inline(always)]
5778#[target_feature(enable = "sve,sve2")]
5779#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5780#[cfg_attr(test, assert_instr(ldnt1w))]
5781pub unsafe fn svldnt1_gather_u32offset_u32(
5782 pg: svbool_t,
5783 base: *const u32,
5784 offsets: svuint32_t,
5785) -> svuint32_t {
5786 svldnt1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned()
5787}
5788#[doc = "Unextended load, non-temporal"]
5789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_f64])"]
5790#[doc = "## Safety"]
5791#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5792#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5793#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5794#[inline(always)]
5795#[target_feature(enable = "sve,sve2")]
5796#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5797#[cfg_attr(test, assert_instr(ldnt1d))]
5798pub unsafe fn svldnt1_gather_u64offset_f64(
5799 pg: svbool_t,
5800 base: *const f64,
5801 offsets: svuint64_t,
5802) -> svfloat64_t {
5803 svldnt1_gather_s64offset_f64(pg, base, offsets.as_signed())
5804}
5805#[doc = "Unextended load, non-temporal"]
5806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_s64])"]
5807#[doc = "## Safety"]
5808#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5809#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5810#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5811#[inline(always)]
5812#[target_feature(enable = "sve,sve2")]
5813#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5814#[cfg_attr(test, assert_instr(ldnt1d))]
5815pub unsafe fn svldnt1_gather_u64offset_s64(
5816 pg: svbool_t,
5817 base: *const i64,
5818 offsets: svuint64_t,
5819) -> svint64_t {
5820 svldnt1_gather_s64offset_s64(pg, base, offsets.as_signed())
5821}
5822#[doc = "Unextended load, non-temporal"]
5823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_u64])"]
5824#[doc = "## Safety"]
5825#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5826#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5827#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5828#[inline(always)]
5829#[target_feature(enable = "sve,sve2")]
5830#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5831#[cfg_attr(test, assert_instr(ldnt1d))]
5832pub unsafe fn svldnt1_gather_u64offset_u64(
5833 pg: svbool_t,
5834 base: *const u64,
5835 offsets: svuint64_t,
5836) -> svuint64_t {
5837 svldnt1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned()
5838}
5839#[doc = "Unextended load, non-temporal"]
5840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_f32)"]
5841#[doc = "## Safety"]
5842#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5843#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5844#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
5845#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5846#[inline(always)]
5847#[target_feature(enable = "sve,sve2")]
5848#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5849#[cfg_attr(test, assert_instr(ldnt1w))]
5850pub unsafe fn svldnt1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t {
5851 svldnt1_gather_u32base_offset_f32(pg, bases, 0)
5852}
5853#[doc = "Unextended load, non-temporal"]
5854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_s32)"]
5855#[doc = "## Safety"]
5856#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5857#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5858#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
5859#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5860#[inline(always)]
5861#[target_feature(enable = "sve,sve2")]
5862#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5863#[cfg_attr(test, assert_instr(ldnt1w))]
5864pub unsafe fn svldnt1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
5865 svldnt1_gather_u32base_offset_s32(pg, bases, 0)
5866}
5867#[doc = "Unextended load, non-temporal"]
5868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_u32)"]
5869#[doc = "## Safety"]
5870#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5871#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5872#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
5873#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5874#[inline(always)]
5875#[target_feature(enable = "sve,sve2")]
5876#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5877#[cfg_attr(test, assert_instr(ldnt1w))]
5878pub unsafe fn svldnt1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
5879 svldnt1_gather_u32base_offset_u32(pg, bases, 0)
5880}
5881#[doc = "Unextended load, non-temporal"]
5882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_f64)"]
5883#[doc = "## Safety"]
5884#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5885#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5886#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
5887#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5888#[inline(always)]
5889#[target_feature(enable = "sve,sve2")]
5890#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5891#[cfg_attr(test, assert_instr(ldnt1d))]
5892pub unsafe fn svldnt1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t {
5893 svldnt1_gather_u64base_offset_f64(pg, bases, 0)
5894}
5895#[doc = "Unextended load, non-temporal"]
5896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_s64)"]
5897#[doc = "## Safety"]
5898#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5899#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5900#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
5901#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5902#[inline(always)]
5903#[target_feature(enable = "sve,sve2")]
5904#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5905#[cfg_attr(test, assert_instr(ldnt1d))]
5906pub unsafe fn svldnt1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
5907 svldnt1_gather_u64base_offset_s64(pg, bases, 0)
5908}
5909#[doc = "Unextended load, non-temporal"]
5910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_u64)"]
5911#[doc = "## Safety"]
5912#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5913#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5914#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
5915#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5916#[inline(always)]
5917#[target_feature(enable = "sve,sve2")]
5918#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5919#[cfg_attr(test, assert_instr(ldnt1d))]
5920pub unsafe fn svldnt1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
5921 svldnt1_gather_u64base_offset_u64(pg, bases, 0)
5922}
5923#[doc = "Unextended load, non-temporal"]
5924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_f32)"]
5925#[doc = "## Safety"]
5926#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5927#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5928#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
5929#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5930#[inline(always)]
5931#[target_feature(enable = "sve,sve2")]
5932#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5933#[cfg_attr(test, assert_instr(ldnt1w))]
5934pub unsafe fn svldnt1_gather_u32base_index_f32(
5935 pg: svbool_t,
5936 bases: svuint32_t,
5937 index: i64,
5938) -> svfloat32_t {
5939 svldnt1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2))
5940}
5941#[doc = "Unextended load, non-temporal"]
5942#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_s32)"]
5943#[doc = "## Safety"]
5944#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5945#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5946#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
5947#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5948#[inline(always)]
5949#[target_feature(enable = "sve,sve2")]
5950#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5951#[cfg_attr(test, assert_instr(ldnt1w))]
5952pub unsafe fn svldnt1_gather_u32base_index_s32(
5953 pg: svbool_t,
5954 bases: svuint32_t,
5955 index: i64,
5956) -> svint32_t {
5957 svldnt1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2))
5958}
5959#[doc = "Unextended load, non-temporal"]
5960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_u32)"]
5961#[doc = "## Safety"]
5962#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5963#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5964#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
5965#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5966#[inline(always)]
5967#[target_feature(enable = "sve,sve2")]
5968#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5969#[cfg_attr(test, assert_instr(ldnt1w))]
5970pub unsafe fn svldnt1_gather_u32base_index_u32(
5971 pg: svbool_t,
5972 bases: svuint32_t,
5973 index: i64,
5974) -> svuint32_t {
5975 svldnt1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2))
5976}
5977#[doc = "Unextended load, non-temporal"]
5978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_f64)"]
5979#[doc = "## Safety"]
5980#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5981#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
5982#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
5983#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
5984#[inline(always)]
5985#[target_feature(enable = "sve,sve2")]
5986#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5987#[cfg_attr(test, assert_instr(ldnt1d))]
5988pub unsafe fn svldnt1_gather_u64base_index_f64(
5989 pg: svbool_t,
5990 bases: svuint64_t,
5991 index: i64,
5992) -> svfloat64_t {
5993 svldnt1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3))
5994}
5995#[doc = "Unextended load, non-temporal"]
5996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_s64)"]
5997#[doc = "## Safety"]
5998#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
5999#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6000#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6001#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6002#[inline(always)]
6003#[target_feature(enable = "sve,sve2")]
6004#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6005#[cfg_attr(test, assert_instr(ldnt1d))]
6006pub unsafe fn svldnt1_gather_u64base_index_s64(
6007 pg: svbool_t,
6008 bases: svuint64_t,
6009 index: i64,
6010) -> svint64_t {
6011 svldnt1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3))
6012}
6013#[doc = "Unextended load, non-temporal"]
6014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_u64)"]
6015#[doc = "## Safety"]
6016#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6017#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6018#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6019#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6020#[inline(always)]
6021#[target_feature(enable = "sve,sve2")]
6022#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6023#[cfg_attr(test, assert_instr(ldnt1d))]
6024pub unsafe fn svldnt1_gather_u64base_index_u64(
6025 pg: svbool_t,
6026 bases: svuint64_t,
6027 index: i64,
6028) -> svuint64_t {
6029 svldnt1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3))
6030}
6031#[doc = "Unextended load, non-temporal"]
6032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_f32)"]
6033#[doc = "## Safety"]
6034#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6035#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6036#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6037#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6038#[inline(always)]
6039#[target_feature(enable = "sve,sve2")]
6040#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6041#[cfg_attr(test, assert_instr(ldnt1w))]
6042pub unsafe fn svldnt1_gather_u32base_offset_f32(
6043 pg: svbool_t,
6044 bases: svuint32_t,
6045 offset: i64,
6046) -> svfloat32_t {
6047 unsafe extern "unadjusted" {
6048 #[cfg_attr(
6049 target_arch = "aarch64",
6050 link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4f32.nxv4i32"
6051 )]
6052 fn _svldnt1_gather_u32base_offset_f32(
6053 pg: svbool4_t,
6054 bases: svint32_t,
6055 offset: i64,
6056 ) -> svfloat32_t;
6057 }
6058 _svldnt1_gather_u32base_offset_f32(pg.sve_into(), bases.as_signed(), offset)
6059}
6060#[doc = "Unextended load, non-temporal"]
6061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_s32)"]
6062#[doc = "## Safety"]
6063#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6064#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6065#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6066#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6067#[inline(always)]
6068#[target_feature(enable = "sve,sve2")]
6069#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6070#[cfg_attr(test, assert_instr(ldnt1w))]
6071pub unsafe fn svldnt1_gather_u32base_offset_s32(
6072 pg: svbool_t,
6073 bases: svuint32_t,
6074 offset: i64,
6075) -> svint32_t {
6076 unsafe extern "unadjusted" {
6077 #[cfg_attr(
6078 target_arch = "aarch64",
6079 link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32"
6080 )]
6081 fn _svldnt1_gather_u32base_offset_s32(
6082 pg: svbool4_t,
6083 bases: svint32_t,
6084 offset: i64,
6085 ) -> svint32_t;
6086 }
6087 _svldnt1_gather_u32base_offset_s32(pg.sve_into(), bases.as_signed(), offset)
6088}
6089#[doc = "Unextended load, non-temporal"]
6090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_u32)"]
6091#[doc = "## Safety"]
6092#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6093#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6094#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6095#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6096#[inline(always)]
6097#[target_feature(enable = "sve,sve2")]
6098#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6099#[cfg_attr(test, assert_instr(ldnt1w))]
6100pub unsafe fn svldnt1_gather_u32base_offset_u32(
6101 pg: svbool_t,
6102 bases: svuint32_t,
6103 offset: i64,
6104) -> svuint32_t {
6105 svldnt1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
6106}
6107#[doc = "Unextended load, non-temporal"]
6108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_f64)"]
6109#[doc = "## Safety"]
6110#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6111#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6112#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6113#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6114#[inline(always)]
6115#[target_feature(enable = "sve,sve2")]
6116#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6117#[cfg_attr(test, assert_instr(ldnt1d))]
6118pub unsafe fn svldnt1_gather_u64base_offset_f64(
6119 pg: svbool_t,
6120 bases: svuint64_t,
6121 offset: i64,
6122) -> svfloat64_t {
6123 unsafe extern "unadjusted" {
6124 #[cfg_attr(
6125 target_arch = "aarch64",
6126 link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2f64.nxv2i64"
6127 )]
6128 fn _svldnt1_gather_u64base_offset_f64(
6129 pg: svbool2_t,
6130 bases: svint64_t,
6131 offset: i64,
6132 ) -> svfloat64_t;
6133 }
6134 _svldnt1_gather_u64base_offset_f64(pg.sve_into(), bases.as_signed(), offset)
6135}
6136#[doc = "Unextended load, non-temporal"]
6137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_s64)"]
6138#[doc = "## Safety"]
6139#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6140#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6141#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6142#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6143#[inline(always)]
6144#[target_feature(enable = "sve,sve2")]
6145#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6146#[cfg_attr(test, assert_instr(ldnt1d))]
6147pub unsafe fn svldnt1_gather_u64base_offset_s64(
6148 pg: svbool_t,
6149 bases: svuint64_t,
6150 offset: i64,
6151) -> svint64_t {
6152 unsafe extern "unadjusted" {
6153 #[cfg_attr(
6154 target_arch = "aarch64",
6155 link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64"
6156 )]
6157 fn _svldnt1_gather_u64base_offset_s64(
6158 pg: svbool2_t,
6159 bases: svint64_t,
6160 offset: i64,
6161 ) -> svint64_t;
6162 }
6163 _svldnt1_gather_u64base_offset_s64(pg.sve_into(), bases.as_signed(), offset)
6164}
6165#[doc = "Unextended load, non-temporal"]
6166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_u64)"]
6167#[doc = "## Safety"]
6168#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6169#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6170#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6171#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6172#[inline(always)]
6173#[target_feature(enable = "sve,sve2")]
6174#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6175#[cfg_attr(test, assert_instr(ldnt1d))]
6176pub unsafe fn svldnt1_gather_u64base_offset_u64(
6177 pg: svbool_t,
6178 bases: svuint64_t,
6179 offset: i64,
6180) -> svuint64_t {
6181 svldnt1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
6182}
6183#[doc = "Load 8-bit data and sign-extend, non-temporal"]
6184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[s64]offset_s64)"]
6185#[doc = "## Safety"]
6186#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6187#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6188#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6189#[inline(always)]
6190#[target_feature(enable = "sve,sve2")]
6191#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6192#[cfg_attr(test, assert_instr(ldnt1sb))]
6193pub unsafe fn svldnt1sb_gather_s64offset_s64(
6194 pg: svbool_t,
6195 base: *const i8,
6196 offsets: svint64_t,
6197) -> svint64_t {
6198 unsafe extern "unadjusted" {
6199 #[cfg_attr(
6200 target_arch = "aarch64",
6201 link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i8"
6202 )]
6203 fn _svldnt1sb_gather_s64offset_s64(
6204 pg: svbool2_t,
6205 base: *const i8,
6206 offsets: svint64_t,
6207 ) -> nxv2i8;
6208 }
6209 crate::intrinsics::simd::simd_cast(_svldnt1sb_gather_s64offset_s64(
6210 pg.sve_into(),
6211 base,
6212 offsets,
6213 ))
6214}
6215#[doc = "Load 16-bit data and sign-extend, non-temporal"]
6216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]offset_s64)"]
6217#[doc = "## Safety"]
6218#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6219#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6220#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6221#[inline(always)]
6222#[target_feature(enable = "sve,sve2")]
6223#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6224#[cfg_attr(test, assert_instr(ldnt1sh))]
6225pub unsafe fn svldnt1sh_gather_s64offset_s64(
6226 pg: svbool_t,
6227 base: *const i16,
6228 offsets: svint64_t,
6229) -> svint64_t {
6230 unsafe extern "unadjusted" {
6231 #[cfg_attr(
6232 target_arch = "aarch64",
6233 link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i16"
6234 )]
6235 fn _svldnt1sh_gather_s64offset_s64(
6236 pg: svbool2_t,
6237 base: *const i16,
6238 offsets: svint64_t,
6239 ) -> nxv2i16;
6240 }
6241 crate::intrinsics::simd::simd_cast(_svldnt1sh_gather_s64offset_s64(
6242 pg.sve_into(),
6243 base,
6244 offsets,
6245 ))
6246}
6247#[doc = "Load 32-bit data and sign-extend, non-temporal"]
6248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]offset_s64)"]
6249#[doc = "## Safety"]
6250#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6251#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6252#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6253#[inline(always)]
6254#[target_feature(enable = "sve,sve2")]
6255#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6256#[cfg_attr(test, assert_instr(ldnt1sw))]
6257pub unsafe fn svldnt1sw_gather_s64offset_s64(
6258 pg: svbool_t,
6259 base: *const i32,
6260 offsets: svint64_t,
6261) -> svint64_t {
6262 unsafe extern "unadjusted" {
6263 #[cfg_attr(
6264 target_arch = "aarch64",
6265 link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i32"
6266 )]
6267 fn _svldnt1sw_gather_s64offset_s64(
6268 pg: svbool2_t,
6269 base: *const i32,
6270 offsets: svint64_t,
6271 ) -> nxv2i32;
6272 }
6273 crate::intrinsics::simd::simd_cast(_svldnt1sw_gather_s64offset_s64(
6274 pg.sve_into(),
6275 base,
6276 offsets,
6277 ))
6278}
6279#[doc = "Load 8-bit data and sign-extend, non-temporal"]
6280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[s64]offset_u64)"]
6281#[doc = "## Safety"]
6282#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6283#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6284#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6285#[inline(always)]
6286#[target_feature(enable = "sve,sve2")]
6287#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6288#[cfg_attr(test, assert_instr(ldnt1sb))]
6289pub unsafe fn svldnt1sb_gather_s64offset_u64(
6290 pg: svbool_t,
6291 base: *const i8,
6292 offsets: svint64_t,
6293) -> svuint64_t {
6294 svldnt1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned()
6295}
6296#[doc = "Load 16-bit data and sign-extend, non-temporal"]
6297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]offset_u64)"]
6298#[doc = "## Safety"]
6299#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6300#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6301#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6302#[inline(always)]
6303#[target_feature(enable = "sve,sve2")]
6304#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6305#[cfg_attr(test, assert_instr(ldnt1sh))]
6306pub unsafe fn svldnt1sh_gather_s64offset_u64(
6307 pg: svbool_t,
6308 base: *const i16,
6309 offsets: svint64_t,
6310) -> svuint64_t {
6311 svldnt1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned()
6312}
6313#[doc = "Load 32-bit data and sign-extend, non-temporal"]
6314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]offset_u64)"]
6315#[doc = "## Safety"]
6316#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6317#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6318#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6319#[inline(always)]
6320#[target_feature(enable = "sve,sve2")]
6321#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6322#[cfg_attr(test, assert_instr(ldnt1sw))]
6323pub unsafe fn svldnt1sw_gather_s64offset_u64(
6324 pg: svbool_t,
6325 base: *const i32,
6326 offsets: svint64_t,
6327) -> svuint64_t {
6328 svldnt1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned()
6329}
6330#[doc = "Load 8-bit data and sign-extend, non-temporal"]
6331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u32]offset_s32)"]
6332#[doc = "## Safety"]
6333#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6334#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6335#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6336#[inline(always)]
6337#[target_feature(enable = "sve,sve2")]
6338#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6339#[cfg_attr(test, assert_instr(ldnt1sb))]
6340pub unsafe fn svldnt1sb_gather_u32offset_s32(
6341 pg: svbool_t,
6342 base: *const i8,
6343 offsets: svuint32_t,
6344) -> svint32_t {
6345 unsafe extern "unadjusted" {
6346 #[cfg_attr(
6347 target_arch = "aarch64",
6348 link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8"
6349 )]
6350 fn _svldnt1sb_gather_u32offset_s32(
6351 pg: svbool4_t,
6352 base: *const i8,
6353 offsets: svint32_t,
6354 ) -> nxv4i8;
6355 }
6356 crate::intrinsics::simd::simd_cast(_svldnt1sb_gather_u32offset_s32(
6357 pg.sve_into(),
6358 base,
6359 offsets.as_signed(),
6360 ))
6361}
6362#[doc = "Load 16-bit data and sign-extend, non-temporal"]
6363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u32]offset_s32)"]
6364#[doc = "## Safety"]
6365#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6366#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6367#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6368#[inline(always)]
6369#[target_feature(enable = "sve,sve2")]
6370#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6371#[cfg_attr(test, assert_instr(ldnt1sh))]
6372pub unsafe fn svldnt1sh_gather_u32offset_s32(
6373 pg: svbool_t,
6374 base: *const i16,
6375 offsets: svuint32_t,
6376) -> svint32_t {
6377 unsafe extern "unadjusted" {
6378 #[cfg_attr(
6379 target_arch = "aarch64",
6380 link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16"
6381 )]
6382 fn _svldnt1sh_gather_u32offset_s32(
6383 pg: svbool4_t,
6384 base: *const i16,
6385 offsets: svint32_t,
6386 ) -> nxv4i16;
6387 }
6388 crate::intrinsics::simd::simd_cast(_svldnt1sh_gather_u32offset_s32(
6389 pg.sve_into(),
6390 base,
6391 offsets.as_signed(),
6392 ))
6393}
6394#[doc = "Load 8-bit data and sign-extend, non-temporal"]
6395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u32]offset_u32)"]
6396#[doc = "## Safety"]
6397#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6398#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6399#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6400#[inline(always)]
6401#[target_feature(enable = "sve,sve2")]
6402#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6403#[cfg_attr(test, assert_instr(ldnt1sb))]
6404pub unsafe fn svldnt1sb_gather_u32offset_u32(
6405 pg: svbool_t,
6406 base: *const i8,
6407 offsets: svuint32_t,
6408) -> svuint32_t {
6409 svldnt1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned()
6410}
6411#[doc = "Load 16-bit data and sign-extend, non-temporal"]
6412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u32]offset_u32)"]
6413#[doc = "## Safety"]
6414#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6415#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6416#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6417#[inline(always)]
6418#[target_feature(enable = "sve,sve2")]
6419#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6420#[cfg_attr(test, assert_instr(ldnt1sh))]
6421pub unsafe fn svldnt1sh_gather_u32offset_u32(
6422 pg: svbool_t,
6423 base: *const i16,
6424 offsets: svuint32_t,
6425) -> svuint32_t {
6426 svldnt1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned()
6427}
6428#[doc = "Load 8-bit data and sign-extend, non-temporal"]
6429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u64]offset_s64)"]
6430#[doc = "## Safety"]
6431#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6432#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6433#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6434#[inline(always)]
6435#[target_feature(enable = "sve,sve2")]
6436#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6437#[cfg_attr(test, assert_instr(ldnt1sb))]
6438pub unsafe fn svldnt1sb_gather_u64offset_s64(
6439 pg: svbool_t,
6440 base: *const i8,
6441 offsets: svuint64_t,
6442) -> svint64_t {
6443 svldnt1sb_gather_s64offset_s64(pg, base, offsets.as_signed())
6444}
6445#[doc = "Load 16-bit data and sign-extend, non-temporal"]
6446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]offset_s64)"]
6447#[doc = "## Safety"]
6448#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6449#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6450#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6451#[inline(always)]
6452#[target_feature(enable = "sve,sve2")]
6453#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6454#[cfg_attr(test, assert_instr(ldnt1sh))]
6455pub unsafe fn svldnt1sh_gather_u64offset_s64(
6456 pg: svbool_t,
6457 base: *const i16,
6458 offsets: svuint64_t,
6459) -> svint64_t {
6460 svldnt1sh_gather_s64offset_s64(pg, base, offsets.as_signed())
6461}
6462#[doc = "Load 32-bit data and sign-extend, non-temporal"]
6463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]offset_s64)"]
6464#[doc = "## Safety"]
6465#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6466#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6467#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6468#[inline(always)]
6469#[target_feature(enable = "sve,sve2")]
6470#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6471#[cfg_attr(test, assert_instr(ldnt1sw))]
6472pub unsafe fn svldnt1sw_gather_u64offset_s64(
6473 pg: svbool_t,
6474 base: *const i32,
6475 offsets: svuint64_t,
6476) -> svint64_t {
6477 svldnt1sw_gather_s64offset_s64(pg, base, offsets.as_signed())
6478}
6479#[doc = "Load 8-bit data and sign-extend, non-temporal"]
6480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u64]offset_u64)"]
6481#[doc = "## Safety"]
6482#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6483#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6484#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6485#[inline(always)]
6486#[target_feature(enable = "sve,sve2")]
6487#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6488#[cfg_attr(test, assert_instr(ldnt1sb))]
6489pub unsafe fn svldnt1sb_gather_u64offset_u64(
6490 pg: svbool_t,
6491 base: *const i8,
6492 offsets: svuint64_t,
6493) -> svuint64_t {
6494 svldnt1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
6495}
6496#[doc = "Load 16-bit data and sign-extend, non-temporal"]
6497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]offset_u64)"]
6498#[doc = "## Safety"]
6499#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6500#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6501#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6502#[inline(always)]
6503#[target_feature(enable = "sve,sve2")]
6504#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6505#[cfg_attr(test, assert_instr(ldnt1sh))]
6506pub unsafe fn svldnt1sh_gather_u64offset_u64(
6507 pg: svbool_t,
6508 base: *const i16,
6509 offsets: svuint64_t,
6510) -> svuint64_t {
6511 svldnt1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
6512}
6513#[doc = "Load 32-bit data and sign-extend, non-temporal"]
6514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]offset_u64)"]
6515#[doc = "## Safety"]
6516#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6517#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6518#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6519#[inline(always)]
6520#[target_feature(enable = "sve,sve2")]
6521#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6522#[cfg_attr(test, assert_instr(ldnt1sw))]
6523pub unsafe fn svldnt1sw_gather_u64offset_u64(
6524 pg: svbool_t,
6525 base: *const i32,
6526 offsets: svuint64_t,
6527) -> svuint64_t {
6528 svldnt1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
6529}
6530#[doc = "Load 8-bit data and sign-extend, non-temporal"]
6531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_offset_s32)"]
6532#[doc = "## Safety"]
6533#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6534#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6535#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6536#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6537#[inline(always)]
6538#[target_feature(enable = "sve,sve2")]
6539#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6540#[cfg_attr(test, assert_instr(ldnt1sb))]
6541pub unsafe fn svldnt1sb_gather_u32base_offset_s32(
6542 pg: svbool_t,
6543 bases: svuint32_t,
6544 offset: i64,
6545) -> svint32_t {
6546 unsafe extern "unadjusted" {
6547 #[cfg_attr(
6548 target_arch = "aarch64",
6549 link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i8.nxv4i32"
6550 )]
6551 fn _svldnt1sb_gather_u32base_offset_s32(
6552 pg: svbool4_t,
6553 bases: svint32_t,
6554 offset: i64,
6555 ) -> nxv4i8;
6556 }
6557 crate::intrinsics::simd::simd_cast(_svldnt1sb_gather_u32base_offset_s32(
6558 pg.sve_into(),
6559 bases.as_signed(),
6560 offset,
6561 ))
6562}
6563#[doc = "Load 16-bit data and sign-extend, non-temporal"]
6564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_offset_s32)"]
6565#[doc = "## Safety"]
6566#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6567#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6568#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6569#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6570#[inline(always)]
6571#[target_feature(enable = "sve,sve2")]
6572#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6573#[cfg_attr(test, assert_instr(ldnt1sh))]
6574pub unsafe fn svldnt1sh_gather_u32base_offset_s32(
6575 pg: svbool_t,
6576 bases: svuint32_t,
6577 offset: i64,
6578) -> svint32_t {
6579 unsafe extern "unadjusted" {
6580 #[cfg_attr(
6581 target_arch = "aarch64",
6582 link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32"
6583 )]
6584 fn _svldnt1sh_gather_u32base_offset_s32(
6585 pg: svbool4_t,
6586 bases: svint32_t,
6587 offset: i64,
6588 ) -> nxv4i16;
6589 }
6590 crate::intrinsics::simd::simd_cast(_svldnt1sh_gather_u32base_offset_s32(
6591 pg.sve_into(),
6592 bases.as_signed(),
6593 offset,
6594 ))
6595}
6596#[doc = "Load 8-bit data and sign-extend, non-temporal"]
6597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_offset_u32)"]
6598#[doc = "## Safety"]
6599#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6600#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6601#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6602#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6603#[inline(always)]
6604#[target_feature(enable = "sve,sve2")]
6605#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6606#[cfg_attr(test, assert_instr(ldnt1sb))]
6607pub unsafe fn svldnt1sb_gather_u32base_offset_u32(
6608 pg: svbool_t,
6609 bases: svuint32_t,
6610 offset: i64,
6611) -> svuint32_t {
6612 svldnt1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
6613}
6614#[doc = "Load 16-bit data and sign-extend, non-temporal"]
6615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_offset_u32)"]
6616#[doc = "## Safety"]
6617#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6618#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6619#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6620#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6621#[inline(always)]
6622#[target_feature(enable = "sve,sve2")]
6623#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6624#[cfg_attr(test, assert_instr(ldnt1sh))]
6625pub unsafe fn svldnt1sh_gather_u32base_offset_u32(
6626 pg: svbool_t,
6627 bases: svuint32_t,
6628 offset: i64,
6629) -> svuint32_t {
6630 svldnt1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
6631}
6632#[doc = "Load 8-bit data and sign-extend, non-temporal"]
6633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_offset_s64)"]
6634#[doc = "## Safety"]
6635#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6636#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6637#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6638#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6639#[inline(always)]
6640#[target_feature(enable = "sve,sve2")]
6641#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6642#[cfg_attr(test, assert_instr(ldnt1sb))]
6643pub unsafe fn svldnt1sb_gather_u64base_offset_s64(
6644 pg: svbool_t,
6645 bases: svuint64_t,
6646 offset: i64,
6647) -> svint64_t {
6648 unsafe extern "unadjusted" {
6649 #[cfg_attr(
6650 target_arch = "aarch64",
6651 link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64"
6652 )]
6653 fn _svldnt1sb_gather_u64base_offset_s64(
6654 pg: svbool2_t,
6655 bases: svint64_t,
6656 offset: i64,
6657 ) -> nxv2i8;
6658 }
6659 crate::intrinsics::simd::simd_cast(_svldnt1sb_gather_u64base_offset_s64(
6660 pg.sve_into(),
6661 bases.as_signed(),
6662 offset,
6663 ))
6664}
6665#[doc = "Load 16-bit data and sign-extend, non-temporal"]
6666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_offset_s64)"]
6667#[doc = "## Safety"]
6668#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6669#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6670#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6671#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6672#[inline(always)]
6673#[target_feature(enable = "sve,sve2")]
6674#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6675#[cfg_attr(test, assert_instr(ldnt1sh))]
6676pub unsafe fn svldnt1sh_gather_u64base_offset_s64(
6677 pg: svbool_t,
6678 bases: svuint64_t,
6679 offset: i64,
6680) -> svint64_t {
6681 unsafe extern "unadjusted" {
6682 #[cfg_attr(
6683 target_arch = "aarch64",
6684 link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64"
6685 )]
6686 fn _svldnt1sh_gather_u64base_offset_s64(
6687 pg: svbool2_t,
6688 bases: svint64_t,
6689 offset: i64,
6690 ) -> nxv2i16;
6691 }
6692 crate::intrinsics::simd::simd_cast(_svldnt1sh_gather_u64base_offset_s64(
6693 pg.sve_into(),
6694 bases.as_signed(),
6695 offset,
6696 ))
6697}
6698#[doc = "Load 32-bit data and sign-extend, non-temporal"]
6699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_offset_s64)"]
6700#[doc = "## Safety"]
6701#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6702#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6703#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6704#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6705#[inline(always)]
6706#[target_feature(enable = "sve,sve2")]
6707#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6708#[cfg_attr(test, assert_instr(ldnt1sw))]
6709pub unsafe fn svldnt1sw_gather_u64base_offset_s64(
6710 pg: svbool_t,
6711 bases: svuint64_t,
6712 offset: i64,
6713) -> svint64_t {
6714 unsafe extern "unadjusted" {
6715 #[cfg_attr(
6716 target_arch = "aarch64",
6717 link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64"
6718 )]
6719 fn _svldnt1sw_gather_u64base_offset_s64(
6720 pg: svbool2_t,
6721 bases: svint64_t,
6722 offset: i64,
6723 ) -> nxv2i32;
6724 }
6725 crate::intrinsics::simd::simd_cast(_svldnt1sw_gather_u64base_offset_s64(
6726 pg.sve_into(),
6727 bases.as_signed(),
6728 offset,
6729 ))
6730}
6731#[doc = "Load 8-bit data and sign-extend, non-temporal"]
6732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_offset_u64)"]
6733#[doc = "## Safety"]
6734#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6735#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6736#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6737#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6738#[inline(always)]
6739#[target_feature(enable = "sve,sve2")]
6740#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6741#[cfg_attr(test, assert_instr(ldnt1sb))]
6742pub unsafe fn svldnt1sb_gather_u64base_offset_u64(
6743 pg: svbool_t,
6744 bases: svuint64_t,
6745 offset: i64,
6746) -> svuint64_t {
6747 svldnt1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
6748}
6749#[doc = "Load 16-bit data and sign-extend, non-temporal"]
6750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_offset_u64)"]
6751#[doc = "## Safety"]
6752#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6753#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6754#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6755#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6756#[inline(always)]
6757#[target_feature(enable = "sve,sve2")]
6758#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6759#[cfg_attr(test, assert_instr(ldnt1sh))]
6760pub unsafe fn svldnt1sh_gather_u64base_offset_u64(
6761 pg: svbool_t,
6762 bases: svuint64_t,
6763 offset: i64,
6764) -> svuint64_t {
6765 svldnt1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
6766}
6767#[doc = "Load 32-bit data and sign-extend, non-temporal"]
6768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_offset_u64)"]
6769#[doc = "## Safety"]
6770#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6771#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6772#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6773#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6774#[inline(always)]
6775#[target_feature(enable = "sve,sve2")]
6776#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6777#[cfg_attr(test, assert_instr(ldnt1sw))]
6778pub unsafe fn svldnt1sw_gather_u64base_offset_u64(
6779 pg: svbool_t,
6780 bases: svuint64_t,
6781 offset: i64,
6782) -> svuint64_t {
6783 svldnt1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
6784}
6785#[doc = "Load 8-bit data and sign-extend, non-temporal"]
6786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_s32)"]
6787#[doc = "## Safety"]
6788#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6789#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6790#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6791#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6792#[inline(always)]
6793#[target_feature(enable = "sve,sve2")]
6794#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6795#[cfg_attr(test, assert_instr(ldnt1sb))]
6796pub unsafe fn svldnt1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
6797 svldnt1sb_gather_u32base_offset_s32(pg, bases, 0)
6798}
6799#[doc = "Load 16-bit data and sign-extend, non-temporal"]
6800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_s32)"]
6801#[doc = "## Safety"]
6802#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6803#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6804#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6805#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6806#[inline(always)]
6807#[target_feature(enable = "sve,sve2")]
6808#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6809#[cfg_attr(test, assert_instr(ldnt1sh))]
6810pub unsafe fn svldnt1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
6811 svldnt1sh_gather_u32base_offset_s32(pg, bases, 0)
6812}
6813#[doc = "Load 8-bit data and sign-extend, non-temporal"]
6814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_u32)"]
6815#[doc = "## Safety"]
6816#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6817#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6818#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6819#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6820#[inline(always)]
6821#[target_feature(enable = "sve,sve2")]
6822#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6823#[cfg_attr(test, assert_instr(ldnt1sb))]
6824pub unsafe fn svldnt1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
6825 svldnt1sb_gather_u32base_offset_u32(pg, bases, 0)
6826}
6827#[doc = "Load 16-bit data and sign-extend, non-temporal"]
6828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_u32)"]
6829#[doc = "## Safety"]
6830#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6831#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6832#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6833#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6834#[inline(always)]
6835#[target_feature(enable = "sve,sve2")]
6836#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6837#[cfg_attr(test, assert_instr(ldnt1sh))]
6838pub unsafe fn svldnt1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
6839 svldnt1sh_gather_u32base_offset_u32(pg, bases, 0)
6840}
6841#[doc = "Load 8-bit data and sign-extend, non-temporal"]
6842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_s64)"]
6843#[doc = "## Safety"]
6844#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6845#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6846#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6847#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6848#[inline(always)]
6849#[target_feature(enable = "sve,sve2")]
6850#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6851#[cfg_attr(test, assert_instr(ldnt1sb))]
6852pub unsafe fn svldnt1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
6853 svldnt1sb_gather_u64base_offset_s64(pg, bases, 0)
6854}
6855#[doc = "Load 16-bit data and sign-extend, non-temporal"]
6856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_s64)"]
6857#[doc = "## Safety"]
6858#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6859#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6860#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6861#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6862#[inline(always)]
6863#[target_feature(enable = "sve,sve2")]
6864#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6865#[cfg_attr(test, assert_instr(ldnt1sh))]
6866pub unsafe fn svldnt1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
6867 svldnt1sh_gather_u64base_offset_s64(pg, bases, 0)
6868}
6869#[doc = "Load 32-bit data and sign-extend, non-temporal"]
6870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_s64)"]
6871#[doc = "## Safety"]
6872#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6873#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6874#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6875#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6876#[inline(always)]
6877#[target_feature(enable = "sve,sve2")]
6878#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6879#[cfg_attr(test, assert_instr(ldnt1sw))]
6880pub unsafe fn svldnt1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
6881 svldnt1sw_gather_u64base_offset_s64(pg, bases, 0)
6882}
6883#[doc = "Load 8-bit data and sign-extend, non-temporal"]
6884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_u64)"]
6885#[doc = "## Safety"]
6886#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6887#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6888#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6889#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6890#[inline(always)]
6891#[target_feature(enable = "sve,sve2")]
6892#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6893#[cfg_attr(test, assert_instr(ldnt1sb))]
6894pub unsafe fn svldnt1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
6895 svldnt1sb_gather_u64base_offset_u64(pg, bases, 0)
6896}
6897#[doc = "Load 16-bit data and sign-extend, non-temporal"]
6898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_u64)"]
6899#[doc = "## Safety"]
6900#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6901#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6902#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6903#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6904#[inline(always)]
6905#[target_feature(enable = "sve,sve2")]
6906#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6907#[cfg_attr(test, assert_instr(ldnt1sh))]
6908pub unsafe fn svldnt1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
6909 svldnt1sh_gather_u64base_offset_u64(pg, bases, 0)
6910}
6911#[doc = "Load 32-bit data and sign-extend, non-temporal"]
6912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_u64)"]
6913#[doc = "## Safety"]
6914#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6915#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6916#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
6917#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6918#[inline(always)]
6919#[target_feature(enable = "sve,sve2")]
6920#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6921#[cfg_attr(test, assert_instr(ldnt1sw))]
6922pub unsafe fn svldnt1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
6923 svldnt1sw_gather_u64base_offset_u64(pg, bases, 0)
6924}
6925#[doc = "Load 16-bit data and sign-extend, non-temporal"]
6926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]index_s64)"]
6927#[doc = "## Safety"]
6928#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6929#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6930#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6931#[inline(always)]
6932#[target_feature(enable = "sve,sve2")]
6933#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6934#[cfg_attr(test, assert_instr(ldnt1sh))]
6935pub unsafe fn svldnt1sh_gather_s64index_s64(
6936 pg: svbool_t,
6937 base: *const i16,
6938 indices: svint64_t,
6939) -> svint64_t {
6940 unsafe extern "unadjusted" {
6941 #[cfg_attr(
6942 target_arch = "aarch64",
6943 link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i16"
6944 )]
6945 fn _svldnt1sh_gather_s64index_s64(
6946 pg: svbool2_t,
6947 base: *const i16,
6948 indices: svint64_t,
6949 ) -> nxv2i16;
6950 }
6951 crate::intrinsics::simd::simd_cast(_svldnt1sh_gather_s64index_s64(pg.sve_into(), base, indices))
6952}
6953#[doc = "Load 32-bit data and sign-extend, non-temporal"]
6954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]index_s64)"]
6955#[doc = "## Safety"]
6956#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6957#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6958#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6959#[inline(always)]
6960#[target_feature(enable = "sve,sve2")]
6961#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6962#[cfg_attr(test, assert_instr(ldnt1sw))]
6963pub unsafe fn svldnt1sw_gather_s64index_s64(
6964 pg: svbool_t,
6965 base: *const i32,
6966 indices: svint64_t,
6967) -> svint64_t {
6968 unsafe extern "unadjusted" {
6969 #[cfg_attr(
6970 target_arch = "aarch64",
6971 link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i32"
6972 )]
6973 fn _svldnt1sw_gather_s64index_s64(
6974 pg: svbool2_t,
6975 base: *const i32,
6976 indices: svint64_t,
6977 ) -> nxv2i32;
6978 }
6979 crate::intrinsics::simd::simd_cast(_svldnt1sw_gather_s64index_s64(pg.sve_into(), base, indices))
6980}
6981#[doc = "Load 16-bit data and sign-extend, non-temporal"]
6982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]index_u64)"]
6983#[doc = "## Safety"]
6984#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
6985#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
6986#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
6987#[inline(always)]
6988#[target_feature(enable = "sve,sve2")]
6989#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6990#[cfg_attr(test, assert_instr(ldnt1sh))]
6991pub unsafe fn svldnt1sh_gather_s64index_u64(
6992 pg: svbool_t,
6993 base: *const i16,
6994 indices: svint64_t,
6995) -> svuint64_t {
6996 svldnt1sh_gather_s64index_s64(pg, base, indices).as_unsigned()
6997}
6998#[doc = "Load 32-bit data and sign-extend, non-temporal"]
6999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]index_u64)"]
7000#[doc = "## Safety"]
7001#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7002#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7003#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7004#[inline(always)]
7005#[target_feature(enable = "sve,sve2")]
7006#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7007#[cfg_attr(test, assert_instr(ldnt1sw))]
7008pub unsafe fn svldnt1sw_gather_s64index_u64(
7009 pg: svbool_t,
7010 base: *const i32,
7011 indices: svint64_t,
7012) -> svuint64_t {
7013 svldnt1sw_gather_s64index_s64(pg, base, indices).as_unsigned()
7014}
7015#[doc = "Load 16-bit data and sign-extend, non-temporal"]
7016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]index_s64)"]
7017#[doc = "## Safety"]
7018#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7019#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7020#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7021#[inline(always)]
7022#[target_feature(enable = "sve,sve2")]
7023#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7024#[cfg_attr(test, assert_instr(ldnt1sh))]
7025pub unsafe fn svldnt1sh_gather_u64index_s64(
7026 pg: svbool_t,
7027 base: *const i16,
7028 indices: svuint64_t,
7029) -> svint64_t {
7030 svldnt1sh_gather_s64index_s64(pg, base, indices.as_signed())
7031}
7032#[doc = "Load 32-bit data and sign-extend, non-temporal"]
7033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]index_s64)"]
7034#[doc = "## Safety"]
7035#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7036#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7037#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7038#[inline(always)]
7039#[target_feature(enable = "sve,sve2")]
7040#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7041#[cfg_attr(test, assert_instr(ldnt1sw))]
7042pub unsafe fn svldnt1sw_gather_u64index_s64(
7043 pg: svbool_t,
7044 base: *const i32,
7045 indices: svuint64_t,
7046) -> svint64_t {
7047 svldnt1sw_gather_s64index_s64(pg, base, indices.as_signed())
7048}
7049#[doc = "Load 16-bit data and sign-extend, non-temporal"]
7050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]index_u64)"]
7051#[doc = "## Safety"]
7052#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7053#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7054#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7055#[inline(always)]
7056#[target_feature(enable = "sve,sve2")]
7057#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7058#[cfg_attr(test, assert_instr(ldnt1sh))]
7059pub unsafe fn svldnt1sh_gather_u64index_u64(
7060 pg: svbool_t,
7061 base: *const i16,
7062 indices: svuint64_t,
7063) -> svuint64_t {
7064 svldnt1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned()
7065}
7066#[doc = "Load 32-bit data and sign-extend, non-temporal"]
7067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]index_u64)"]
7068#[doc = "## Safety"]
7069#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7070#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7071#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7072#[inline(always)]
7073#[target_feature(enable = "sve,sve2")]
7074#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7075#[cfg_attr(test, assert_instr(ldnt1sw))]
7076pub unsafe fn svldnt1sw_gather_u64index_u64(
7077 pg: svbool_t,
7078 base: *const i32,
7079 indices: svuint64_t,
7080) -> svuint64_t {
7081 svldnt1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned()
7082}
7083#[doc = "Load 16-bit data and sign-extend, non-temporal"]
7084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_index_s32)"]
7085#[doc = "## Safety"]
7086#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7087#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7088#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7089#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7090#[inline(always)]
7091#[target_feature(enable = "sve,sve2")]
7092#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7093#[cfg_attr(test, assert_instr(ldnt1sh))]
7094pub unsafe fn svldnt1sh_gather_u32base_index_s32(
7095 pg: svbool_t,
7096 bases: svuint32_t,
7097 index: i64,
7098) -> svint32_t {
7099 svldnt1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1))
7100}
7101#[doc = "Load 16-bit data and sign-extend, non-temporal"]
7102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_index_u32)"]
7103#[doc = "## Safety"]
7104#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7105#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7106#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7107#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7108#[inline(always)]
7109#[target_feature(enable = "sve,sve2")]
7110#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7111#[cfg_attr(test, assert_instr(ldnt1sh))]
7112pub unsafe fn svldnt1sh_gather_u32base_index_u32(
7113 pg: svbool_t,
7114 bases: svuint32_t,
7115 index: i64,
7116) -> svuint32_t {
7117 svldnt1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1))
7118}
7119#[doc = "Load 16-bit data and sign-extend, non-temporal"]
7120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_index_s64)"]
7121#[doc = "## Safety"]
7122#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7123#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7124#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7125#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7126#[inline(always)]
7127#[target_feature(enable = "sve,sve2")]
7128#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7129#[cfg_attr(test, assert_instr(ldnt1sh))]
7130pub unsafe fn svldnt1sh_gather_u64base_index_s64(
7131 pg: svbool_t,
7132 bases: svuint64_t,
7133 index: i64,
7134) -> svint64_t {
7135 svldnt1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1))
7136}
7137#[doc = "Load 32-bit data and sign-extend, non-temporal"]
7138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_index_s64)"]
7139#[doc = "## Safety"]
7140#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7141#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7142#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7143#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7144#[inline(always)]
7145#[target_feature(enable = "sve,sve2")]
7146#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7147#[cfg_attr(test, assert_instr(ldnt1sw))]
7148pub unsafe fn svldnt1sw_gather_u64base_index_s64(
7149 pg: svbool_t,
7150 bases: svuint64_t,
7151 index: i64,
7152) -> svint64_t {
7153 svldnt1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2))
7154}
7155#[doc = "Load 16-bit data and sign-extend, non-temporal"]
7156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_index_u64)"]
7157#[doc = "## Safety"]
7158#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7159#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7160#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7161#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7162#[inline(always)]
7163#[target_feature(enable = "sve,sve2")]
7164#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7165#[cfg_attr(test, assert_instr(ldnt1sh))]
7166pub unsafe fn svldnt1sh_gather_u64base_index_u64(
7167 pg: svbool_t,
7168 bases: svuint64_t,
7169 index: i64,
7170) -> svuint64_t {
7171 svldnt1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1))
7172}
7173#[doc = "Load 32-bit data and sign-extend, non-temporal"]
7174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_index_u64)"]
7175#[doc = "## Safety"]
7176#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7177#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7178#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7179#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7180#[inline(always)]
7181#[target_feature(enable = "sve,sve2")]
7182#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7183#[cfg_attr(test, assert_instr(ldnt1sw))]
7184pub unsafe fn svldnt1sw_gather_u64base_index_u64(
7185 pg: svbool_t,
7186 bases: svuint64_t,
7187 index: i64,
7188) -> svuint64_t {
7189 svldnt1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2))
7190}
7191#[doc = "Load 8-bit data and zero-extend, non-temporal"]
7192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[s64]offset_s64)"]
7193#[doc = "## Safety"]
7194#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7195#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7196#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7197#[inline(always)]
7198#[target_feature(enable = "sve,sve2")]
7199#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7200#[cfg_attr(test, assert_instr(ldnt1b))]
7201pub unsafe fn svldnt1ub_gather_s64offset_s64(
7202 pg: svbool_t,
7203 base: *const u8,
7204 offsets: svint64_t,
7205) -> svint64_t {
7206 svldnt1ub_gather_s64offset_u64(pg, base, offsets).as_signed()
7207}
7208#[doc = "Load 16-bit data and zero-extend, non-temporal"]
7209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]offset_s64)"]
7210#[doc = "## Safety"]
7211#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7212#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7213#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7214#[inline(always)]
7215#[target_feature(enable = "sve,sve2")]
7216#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7217#[cfg_attr(test, assert_instr(ldnt1h))]
7218pub unsafe fn svldnt1uh_gather_s64offset_s64(
7219 pg: svbool_t,
7220 base: *const u16,
7221 offsets: svint64_t,
7222) -> svint64_t {
7223 svldnt1uh_gather_s64offset_u64(pg, base, offsets).as_signed()
7224}
7225#[doc = "Load 32-bit data and zero-extend, non-temporal"]
7226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]offset_s64)"]
7227#[doc = "## Safety"]
7228#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7229#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7230#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7231#[inline(always)]
7232#[target_feature(enable = "sve,sve2")]
7233#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7234#[cfg_attr(test, assert_instr(ldnt1w))]
7235pub unsafe fn svldnt1uw_gather_s64offset_s64(
7236 pg: svbool_t,
7237 base: *const u32,
7238 offsets: svint64_t,
7239) -> svint64_t {
7240 svldnt1uw_gather_s64offset_u64(pg, base, offsets).as_signed()
7241}
7242#[doc = "Load 8-bit data and zero-extend, non-temporal"]
7243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[s64]offset_u64)"]
7244#[doc = "## Safety"]
7245#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7246#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7247#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7248#[inline(always)]
7249#[target_feature(enable = "sve,sve2")]
7250#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7251#[cfg_attr(test, assert_instr(ldnt1b))]
7252pub unsafe fn svldnt1ub_gather_s64offset_u64(
7253 pg: svbool_t,
7254 base: *const u8,
7255 offsets: svint64_t,
7256) -> svuint64_t {
7257 unsafe extern "unadjusted" {
7258 #[cfg_attr(
7259 target_arch = "aarch64",
7260 link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i8"
7261 )]
7262 fn _svldnt1ub_gather_s64offset_u64(
7263 pg: svbool2_t,
7264 base: *const i8,
7265 offsets: svint64_t,
7266 ) -> nxv2i8;
7267 }
7268 crate::intrinsics::simd::simd_cast::<nxv2u8, _>(
7269 _svldnt1ub_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(),
7270 )
7271}
7272#[doc = "Load 16-bit data and zero-extend, non-temporal"]
7273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]offset_u64)"]
7274#[doc = "## Safety"]
7275#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7276#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7277#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7278#[inline(always)]
7279#[target_feature(enable = "sve,sve2")]
7280#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7281#[cfg_attr(test, assert_instr(ldnt1h))]
7282pub unsafe fn svldnt1uh_gather_s64offset_u64(
7283 pg: svbool_t,
7284 base: *const u16,
7285 offsets: svint64_t,
7286) -> svuint64_t {
7287 unsafe extern "unadjusted" {
7288 #[cfg_attr(
7289 target_arch = "aarch64",
7290 link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i16"
7291 )]
7292 fn _svldnt1uh_gather_s64offset_u64(
7293 pg: svbool2_t,
7294 base: *const i16,
7295 offsets: svint64_t,
7296 ) -> nxv2i16;
7297 }
7298 crate::intrinsics::simd::simd_cast::<nxv2u16, _>(
7299 _svldnt1uh_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(),
7300 )
7301}
7302#[doc = "Load 32-bit data and zero-extend, non-temporal"]
7303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]offset_u64)"]
7304#[doc = "## Safety"]
7305#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7306#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7307#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7308#[inline(always)]
7309#[target_feature(enable = "sve,sve2")]
7310#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7311#[cfg_attr(test, assert_instr(ldnt1w))]
7312pub unsafe fn svldnt1uw_gather_s64offset_u64(
7313 pg: svbool_t,
7314 base: *const u32,
7315 offsets: svint64_t,
7316) -> svuint64_t {
7317 unsafe extern "unadjusted" {
7318 #[cfg_attr(
7319 target_arch = "aarch64",
7320 link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i32"
7321 )]
7322 fn _svldnt1uw_gather_s64offset_u64(
7323 pg: svbool2_t,
7324 base: *const i32,
7325 offsets: svint64_t,
7326 ) -> nxv2i32;
7327 }
7328 crate::intrinsics::simd::simd_cast::<nxv2u32, _>(
7329 _svldnt1uw_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(),
7330 )
7331}
7332#[doc = "Load 8-bit data and zero-extend, non-temporal"]
7333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u32]offset_s32)"]
7334#[doc = "## Safety"]
7335#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7336#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7337#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7338#[inline(always)]
7339#[target_feature(enable = "sve,sve2")]
7340#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7341#[cfg_attr(test, assert_instr(ldnt1b))]
7342pub unsafe fn svldnt1ub_gather_u32offset_s32(
7343 pg: svbool_t,
7344 base: *const u8,
7345 offsets: svuint32_t,
7346) -> svint32_t {
7347 svldnt1ub_gather_u32offset_u32(pg, base, offsets).as_signed()
7348}
7349#[doc = "Load 16-bit data and zero-extend, non-temporal"]
7350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u32]offset_s32)"]
7351#[doc = "## Safety"]
7352#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7353#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7354#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7355#[inline(always)]
7356#[target_feature(enable = "sve,sve2")]
7357#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7358#[cfg_attr(test, assert_instr(ldnt1h))]
7359pub unsafe fn svldnt1uh_gather_u32offset_s32(
7360 pg: svbool_t,
7361 base: *const u16,
7362 offsets: svuint32_t,
7363) -> svint32_t {
7364 svldnt1uh_gather_u32offset_u32(pg, base, offsets).as_signed()
7365}
7366#[doc = "Load 8-bit data and zero-extend, non-temporal"]
7367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u32]offset_u32)"]
7368#[doc = "## Safety"]
7369#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7370#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7371#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7372#[inline(always)]
7373#[target_feature(enable = "sve,sve2")]
7374#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7375#[cfg_attr(test, assert_instr(ldnt1b))]
7376pub unsafe fn svldnt1ub_gather_u32offset_u32(
7377 pg: svbool_t,
7378 base: *const u8,
7379 offsets: svuint32_t,
7380) -> svuint32_t {
7381 unsafe extern "unadjusted" {
7382 #[cfg_attr(
7383 target_arch = "aarch64",
7384 link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8"
7385 )]
7386 fn _svldnt1ub_gather_u32offset_u32(
7387 pg: svbool4_t,
7388 base: *const i8,
7389 offsets: svint32_t,
7390 ) -> nxv4i8;
7391 }
7392 crate::intrinsics::simd::simd_cast::<nxv4u8, _>(
7393 _svldnt1ub_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed())
7394 .as_unsigned(),
7395 )
7396}
7397#[doc = "Load 16-bit data and zero-extend, non-temporal"]
7398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u32]offset_u32)"]
7399#[doc = "## Safety"]
7400#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7401#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7402#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7403#[inline(always)]
7404#[target_feature(enable = "sve,sve2")]
7405#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7406#[cfg_attr(test, assert_instr(ldnt1h))]
7407pub unsafe fn svldnt1uh_gather_u32offset_u32(
7408 pg: svbool_t,
7409 base: *const u16,
7410 offsets: svuint32_t,
7411) -> svuint32_t {
7412 unsafe extern "unadjusted" {
7413 #[cfg_attr(
7414 target_arch = "aarch64",
7415 link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16"
7416 )]
7417 fn _svldnt1uh_gather_u32offset_u32(
7418 pg: svbool4_t,
7419 base: *const i16,
7420 offsets: svint32_t,
7421 ) -> nxv4i16;
7422 }
7423 crate::intrinsics::simd::simd_cast::<nxv4u16, _>(
7424 _svldnt1uh_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed())
7425 .as_unsigned(),
7426 )
7427}
7428#[doc = "Load 8-bit data and zero-extend, non-temporal"]
7429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u64]offset_s64)"]
7430#[doc = "## Safety"]
7431#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7432#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7433#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7434#[inline(always)]
7435#[target_feature(enable = "sve,sve2")]
7436#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7437#[cfg_attr(test, assert_instr(ldnt1b))]
7438pub unsafe fn svldnt1ub_gather_u64offset_s64(
7439 pg: svbool_t,
7440 base: *const u8,
7441 offsets: svuint64_t,
7442) -> svint64_t {
7443 svldnt1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
7444}
7445#[doc = "Load 16-bit data and zero-extend, non-temporal"]
7446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]offset_s64)"]
7447#[doc = "## Safety"]
7448#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7449#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7450#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7451#[inline(always)]
7452#[target_feature(enable = "sve,sve2")]
7453#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7454#[cfg_attr(test, assert_instr(ldnt1h))]
7455pub unsafe fn svldnt1uh_gather_u64offset_s64(
7456 pg: svbool_t,
7457 base: *const u16,
7458 offsets: svuint64_t,
7459) -> svint64_t {
7460 svldnt1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
7461}
7462#[doc = "Load 32-bit data and zero-extend, non-temporal"]
7463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]offset_s64)"]
7464#[doc = "## Safety"]
7465#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7466#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7467#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7468#[inline(always)]
7469#[target_feature(enable = "sve,sve2")]
7470#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7471#[cfg_attr(test, assert_instr(ldnt1w))]
7472pub unsafe fn svldnt1uw_gather_u64offset_s64(
7473 pg: svbool_t,
7474 base: *const u32,
7475 offsets: svuint64_t,
7476) -> svint64_t {
7477 svldnt1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
7478}
7479#[doc = "Load 8-bit data and zero-extend, non-temporal"]
7480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u64]offset_u64)"]
7481#[doc = "## Safety"]
7482#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7483#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7484#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7485#[inline(always)]
7486#[target_feature(enable = "sve,sve2")]
7487#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7488#[cfg_attr(test, assert_instr(ldnt1b))]
7489pub unsafe fn svldnt1ub_gather_u64offset_u64(
7490 pg: svbool_t,
7491 base: *const u8,
7492 offsets: svuint64_t,
7493) -> svuint64_t {
7494 svldnt1ub_gather_s64offset_u64(pg, base, offsets.as_signed())
7495}
7496#[doc = "Load 16-bit data and zero-extend, non-temporal"]
7497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]offset_u64)"]
7498#[doc = "## Safety"]
7499#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7500#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7501#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7502#[inline(always)]
7503#[target_feature(enable = "sve,sve2")]
7504#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7505#[cfg_attr(test, assert_instr(ldnt1h))]
7506pub unsafe fn svldnt1uh_gather_u64offset_u64(
7507 pg: svbool_t,
7508 base: *const u16,
7509 offsets: svuint64_t,
7510) -> svuint64_t {
7511 svldnt1uh_gather_s64offset_u64(pg, base, offsets.as_signed())
7512}
7513#[doc = "Load 32-bit data and zero-extend, non-temporal"]
7514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]offset_u64)"]
7515#[doc = "## Safety"]
7516#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7517#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7518#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7519#[inline(always)]
7520#[target_feature(enable = "sve,sve2")]
7521#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7522#[cfg_attr(test, assert_instr(ldnt1w))]
7523pub unsafe fn svldnt1uw_gather_u64offset_u64(
7524 pg: svbool_t,
7525 base: *const u32,
7526 offsets: svuint64_t,
7527) -> svuint64_t {
7528 svldnt1uw_gather_s64offset_u64(pg, base, offsets.as_signed())
7529}
7530#[doc = "Load 8-bit data and zero-extend, non-temporal"]
7531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_offset_s32)"]
7532#[doc = "## Safety"]
7533#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7534#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7535#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7536#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7537#[inline(always)]
7538#[target_feature(enable = "sve,sve2")]
7539#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7540#[cfg_attr(test, assert_instr(ldnt1b))]
7541pub unsafe fn svldnt1ub_gather_u32base_offset_s32(
7542 pg: svbool_t,
7543 bases: svuint32_t,
7544 offset: i64,
7545) -> svint32_t {
7546 svldnt1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed()
7547}
7548#[doc = "Load 16-bit data and zero-extend, non-temporal"]
7549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_offset_s32)"]
7550#[doc = "## Safety"]
7551#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7552#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7553#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7554#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7555#[inline(always)]
7556#[target_feature(enable = "sve,sve2")]
7557#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7558#[cfg_attr(test, assert_instr(ldnt1h))]
7559pub unsafe fn svldnt1uh_gather_u32base_offset_s32(
7560 pg: svbool_t,
7561 bases: svuint32_t,
7562 offset: i64,
7563) -> svint32_t {
7564 svldnt1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed()
7565}
7566#[doc = "Load 8-bit data and zero-extend, non-temporal"]
7567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_offset_u32)"]
7568#[doc = "## Safety"]
7569#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7570#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7571#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7572#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7573#[inline(always)]
7574#[target_feature(enable = "sve,sve2")]
7575#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7576#[cfg_attr(test, assert_instr(ldnt1b))]
7577pub unsafe fn svldnt1ub_gather_u32base_offset_u32(
7578 pg: svbool_t,
7579 bases: svuint32_t,
7580 offset: i64,
7581) -> svuint32_t {
7582 unsafe extern "unadjusted" {
7583 #[cfg_attr(
7584 target_arch = "aarch64",
7585 link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i8.nxv4i32"
7586 )]
7587 fn _svldnt1ub_gather_u32base_offset_u32(
7588 pg: svbool4_t,
7589 bases: svint32_t,
7590 offset: i64,
7591 ) -> nxv4i8;
7592 }
7593 crate::intrinsics::simd::simd_cast::<nxv4u8, _>(
7594 _svldnt1ub_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset)
7595 .as_unsigned(),
7596 )
7597}
7598#[doc = "Load 16-bit data and zero-extend, non-temporal"]
7599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_offset_u32)"]
7600#[doc = "## Safety"]
7601#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7602#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7603#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7604#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7605#[inline(always)]
7606#[target_feature(enable = "sve,sve2")]
7607#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7608#[cfg_attr(test, assert_instr(ldnt1h))]
7609pub unsafe fn svldnt1uh_gather_u32base_offset_u32(
7610 pg: svbool_t,
7611 bases: svuint32_t,
7612 offset: i64,
7613) -> svuint32_t {
7614 unsafe extern "unadjusted" {
7615 #[cfg_attr(
7616 target_arch = "aarch64",
7617 link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32"
7618 )]
7619 fn _svldnt1uh_gather_u32base_offset_u32(
7620 pg: svbool4_t,
7621 bases: svint32_t,
7622 offset: i64,
7623 ) -> nxv4i16;
7624 }
7625 crate::intrinsics::simd::simd_cast::<nxv4u16, _>(
7626 _svldnt1uh_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset)
7627 .as_unsigned(),
7628 )
7629}
7630#[doc = "Load 8-bit data and zero-extend, non-temporal"]
7631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_offset_s64)"]
7632#[doc = "## Safety"]
7633#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7634#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7635#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7636#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7637#[inline(always)]
7638#[target_feature(enable = "sve,sve2")]
7639#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7640#[cfg_attr(test, assert_instr(ldnt1b))]
7641pub unsafe fn svldnt1ub_gather_u64base_offset_s64(
7642 pg: svbool_t,
7643 bases: svuint64_t,
7644 offset: i64,
7645) -> svint64_t {
7646 svldnt1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed()
7647}
7648#[doc = "Load 16-bit data and zero-extend, non-temporal"]
7649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_offset_s64)"]
7650#[doc = "## Safety"]
7651#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7652#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7653#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7654#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7655#[inline(always)]
7656#[target_feature(enable = "sve,sve2")]
7657#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7658#[cfg_attr(test, assert_instr(ldnt1h))]
7659pub unsafe fn svldnt1uh_gather_u64base_offset_s64(
7660 pg: svbool_t,
7661 bases: svuint64_t,
7662 offset: i64,
7663) -> svint64_t {
7664 svldnt1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed()
7665}
7666#[doc = "Load 32-bit data and zero-extend, non-temporal"]
7667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_offset_s64)"]
7668#[doc = "## Safety"]
7669#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7670#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7671#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7672#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7673#[inline(always)]
7674#[target_feature(enable = "sve,sve2")]
7675#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7676#[cfg_attr(test, assert_instr(ldnt1w))]
7677pub unsafe fn svldnt1uw_gather_u64base_offset_s64(
7678 pg: svbool_t,
7679 bases: svuint64_t,
7680 offset: i64,
7681) -> svint64_t {
7682 svldnt1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed()
7683}
7684#[doc = "Load 8-bit data and zero-extend, non-temporal"]
7685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_offset_u64)"]
7686#[doc = "## Safety"]
7687#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7688#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7689#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7690#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7691#[inline(always)]
7692#[target_feature(enable = "sve,sve2")]
7693#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7694#[cfg_attr(test, assert_instr(ldnt1b))]
7695pub unsafe fn svldnt1ub_gather_u64base_offset_u64(
7696 pg: svbool_t,
7697 bases: svuint64_t,
7698 offset: i64,
7699) -> svuint64_t {
7700 unsafe extern "unadjusted" {
7701 #[cfg_attr(
7702 target_arch = "aarch64",
7703 link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64"
7704 )]
7705 fn _svldnt1ub_gather_u64base_offset_u64(
7706 pg: svbool2_t,
7707 bases: svint64_t,
7708 offset: i64,
7709 ) -> nxv2i8;
7710 }
7711 crate::intrinsics::simd::simd_cast::<nxv2u8, _>(
7712 _svldnt1ub_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset)
7713 .as_unsigned(),
7714 )
7715}
7716#[doc = "Load 16-bit data and zero-extend, non-temporal"]
7717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_offset_u64)"]
7718#[doc = "## Safety"]
7719#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7720#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7721#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7722#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7723#[inline(always)]
7724#[target_feature(enable = "sve,sve2")]
7725#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7726#[cfg_attr(test, assert_instr(ldnt1h))]
7727pub unsafe fn svldnt1uh_gather_u64base_offset_u64(
7728 pg: svbool_t,
7729 bases: svuint64_t,
7730 offset: i64,
7731) -> svuint64_t {
7732 unsafe extern "unadjusted" {
7733 #[cfg_attr(
7734 target_arch = "aarch64",
7735 link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64"
7736 )]
7737 fn _svldnt1uh_gather_u64base_offset_u64(
7738 pg: svbool2_t,
7739 bases: svint64_t,
7740 offset: i64,
7741 ) -> nxv2i16;
7742 }
7743 crate::intrinsics::simd::simd_cast::<nxv2u16, _>(
7744 _svldnt1uh_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset)
7745 .as_unsigned(),
7746 )
7747}
7748#[doc = "Load 32-bit data and zero-extend, non-temporal"]
7749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_offset_u64)"]
7750#[doc = "## Safety"]
7751#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7752#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7753#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7754#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7755#[inline(always)]
7756#[target_feature(enable = "sve,sve2")]
7757#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7758#[cfg_attr(test, assert_instr(ldnt1w))]
7759pub unsafe fn svldnt1uw_gather_u64base_offset_u64(
7760 pg: svbool_t,
7761 bases: svuint64_t,
7762 offset: i64,
7763) -> svuint64_t {
7764 unsafe extern "unadjusted" {
7765 #[cfg_attr(
7766 target_arch = "aarch64",
7767 link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64"
7768 )]
7769 fn _svldnt1uw_gather_u64base_offset_u64(
7770 pg: svbool2_t,
7771 bases: svint64_t,
7772 offset: i64,
7773 ) -> nxv2i32;
7774 }
7775 crate::intrinsics::simd::simd_cast::<nxv2u32, _>(
7776 _svldnt1uw_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset)
7777 .as_unsigned(),
7778 )
7779}
7780#[doc = "Load 8-bit data and zero-extend, non-temporal"]
7781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_s32)"]
7782#[doc = "## Safety"]
7783#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7784#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7785#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7786#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7787#[inline(always)]
7788#[target_feature(enable = "sve,sve2")]
7789#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7790#[cfg_attr(test, assert_instr(ldnt1b))]
7791pub unsafe fn svldnt1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
7792 svldnt1ub_gather_u32base_offset_s32(pg, bases, 0)
7793}
7794#[doc = "Load 16-bit data and zero-extend, non-temporal"]
7795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_s32)"]
7796#[doc = "## Safety"]
7797#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7798#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7799#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7800#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7801#[inline(always)]
7802#[target_feature(enable = "sve,sve2")]
7803#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7804#[cfg_attr(test, assert_instr(ldnt1h))]
7805pub unsafe fn svldnt1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
7806 svldnt1uh_gather_u32base_offset_s32(pg, bases, 0)
7807}
7808#[doc = "Load 8-bit data and zero-extend, non-temporal"]
7809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_u32)"]
7810#[doc = "## Safety"]
7811#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7812#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7813#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7814#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7815#[inline(always)]
7816#[target_feature(enable = "sve,sve2")]
7817#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7818#[cfg_attr(test, assert_instr(ldnt1b))]
7819pub unsafe fn svldnt1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
7820 svldnt1ub_gather_u32base_offset_u32(pg, bases, 0)
7821}
7822#[doc = "Load 16-bit data and zero-extend, non-temporal"]
7823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_u32)"]
7824#[doc = "## Safety"]
7825#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7826#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7827#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7828#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7829#[inline(always)]
7830#[target_feature(enable = "sve,sve2")]
7831#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7832#[cfg_attr(test, assert_instr(ldnt1h))]
7833pub unsafe fn svldnt1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
7834 svldnt1uh_gather_u32base_offset_u32(pg, bases, 0)
7835}
7836#[doc = "Load 8-bit data and zero-extend, non-temporal"]
7837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_s64)"]
7838#[doc = "## Safety"]
7839#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7840#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7841#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7842#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7843#[inline(always)]
7844#[target_feature(enable = "sve,sve2")]
7845#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7846#[cfg_attr(test, assert_instr(ldnt1b))]
7847pub unsafe fn svldnt1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
7848 svldnt1ub_gather_u64base_offset_s64(pg, bases, 0)
7849}
7850#[doc = "Load 16-bit data and zero-extend, non-temporal"]
7851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_s64)"]
7852#[doc = "## Safety"]
7853#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7854#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7855#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7856#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7857#[inline(always)]
7858#[target_feature(enable = "sve,sve2")]
7859#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7860#[cfg_attr(test, assert_instr(ldnt1h))]
7861pub unsafe fn svldnt1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
7862 svldnt1uh_gather_u64base_offset_s64(pg, bases, 0)
7863}
7864#[doc = "Load 32-bit data and zero-extend, non-temporal"]
7865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_s64)"]
7866#[doc = "## Safety"]
7867#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7868#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7869#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7870#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7871#[inline(always)]
7872#[target_feature(enable = "sve,sve2")]
7873#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7874#[cfg_attr(test, assert_instr(ldnt1w))]
7875pub unsafe fn svldnt1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
7876 svldnt1uw_gather_u64base_offset_s64(pg, bases, 0)
7877}
7878#[doc = "Load 8-bit data and zero-extend, non-temporal"]
7879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_u64)"]
7880#[doc = "## Safety"]
7881#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7882#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7883#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7884#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7885#[inline(always)]
7886#[target_feature(enable = "sve,sve2")]
7887#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7888#[cfg_attr(test, assert_instr(ldnt1b))]
7889pub unsafe fn svldnt1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
7890 svldnt1ub_gather_u64base_offset_u64(pg, bases, 0)
7891}
7892#[doc = "Load 16-bit data and zero-extend, non-temporal"]
7893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_u64)"]
7894#[doc = "## Safety"]
7895#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7896#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7897#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7898#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7899#[inline(always)]
7900#[target_feature(enable = "sve,sve2")]
7901#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7902#[cfg_attr(test, assert_instr(ldnt1h))]
7903pub unsafe fn svldnt1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
7904 svldnt1uh_gather_u64base_offset_u64(pg, bases, 0)
7905}
7906#[doc = "Load 32-bit data and zero-extend, non-temporal"]
7907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_u64)"]
7908#[doc = "## Safety"]
7909#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7910#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7911#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
7912#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7913#[inline(always)]
7914#[target_feature(enable = "sve,sve2")]
7915#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7916#[cfg_attr(test, assert_instr(ldnt1w))]
7917pub unsafe fn svldnt1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
7918 svldnt1uw_gather_u64base_offset_u64(pg, bases, 0)
7919}
7920#[doc = "Load 16-bit data and zero-extend, non-temporal"]
7921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]index_s64)"]
7922#[doc = "## Safety"]
7923#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7924#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7925#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7926#[inline(always)]
7927#[target_feature(enable = "sve,sve2")]
7928#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7929#[cfg_attr(test, assert_instr(ldnt1h))]
7930pub unsafe fn svldnt1uh_gather_s64index_s64(
7931 pg: svbool_t,
7932 base: *const u16,
7933 indices: svint64_t,
7934) -> svint64_t {
7935 svldnt1uh_gather_s64index_u64(pg, base, indices).as_signed()
7936}
7937#[doc = "Load 32-bit data and zero-extend, non-temporal"]
7938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]index_s64)"]
7939#[doc = "## Safety"]
7940#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7941#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7942#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7943#[inline(always)]
7944#[target_feature(enable = "sve,sve2")]
7945#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7946#[cfg_attr(test, assert_instr(ldnt1w))]
7947pub unsafe fn svldnt1uw_gather_s64index_s64(
7948 pg: svbool_t,
7949 base: *const u32,
7950 indices: svint64_t,
7951) -> svint64_t {
7952 svldnt1uw_gather_s64index_u64(pg, base, indices).as_signed()
7953}
7954#[doc = "Load 16-bit data and zero-extend, non-temporal"]
7955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]index_u64)"]
7956#[doc = "## Safety"]
7957#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7958#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7959#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7960#[inline(always)]
7961#[target_feature(enable = "sve,sve2")]
7962#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7963#[cfg_attr(test, assert_instr(ldnt1h))]
7964pub unsafe fn svldnt1uh_gather_s64index_u64(
7965 pg: svbool_t,
7966 base: *const u16,
7967 indices: svint64_t,
7968) -> svuint64_t {
7969 unsafe extern "unadjusted" {
7970 #[cfg_attr(
7971 target_arch = "aarch64",
7972 link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i16"
7973 )]
7974 fn _svldnt1uh_gather_s64index_u64(
7975 pg: svbool2_t,
7976 base: *const i16,
7977 indices: svint64_t,
7978 ) -> nxv2i16;
7979 }
7980 crate::intrinsics::simd::simd_cast::<nxv2u16, _>(
7981 _svldnt1uh_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(),
7982 )
7983}
7984#[doc = "Load 32-bit data and zero-extend, non-temporal"]
7985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]index_u64)"]
7986#[doc = "## Safety"]
7987#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
7988#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
7989#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
7990#[inline(always)]
7991#[target_feature(enable = "sve,sve2")]
7992#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7993#[cfg_attr(test, assert_instr(ldnt1w))]
7994pub unsafe fn svldnt1uw_gather_s64index_u64(
7995 pg: svbool_t,
7996 base: *const u32,
7997 indices: svint64_t,
7998) -> svuint64_t {
7999 unsafe extern "unadjusted" {
8000 #[cfg_attr(
8001 target_arch = "aarch64",
8002 link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i32"
8003 )]
8004 fn _svldnt1uw_gather_s64index_u64(
8005 pg: svbool2_t,
8006 base: *const i32,
8007 indices: svint64_t,
8008 ) -> nxv2i32;
8009 }
8010 crate::intrinsics::simd::simd_cast::<nxv2u32, _>(
8011 _svldnt1uw_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(),
8012 )
8013}
8014#[doc = "Load 16-bit data and zero-extend, non-temporal"]
8015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]index_s64)"]
8016#[doc = "## Safety"]
8017#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
8018#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
8019#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
8020#[inline(always)]
8021#[target_feature(enable = "sve,sve2")]
8022#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8023#[cfg_attr(test, assert_instr(ldnt1h))]
8024pub unsafe fn svldnt1uh_gather_u64index_s64(
8025 pg: svbool_t,
8026 base: *const u16,
8027 indices: svuint64_t,
8028) -> svint64_t {
8029 svldnt1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed()
8030}
8031#[doc = "Load 32-bit data and zero-extend, non-temporal"]
8032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]index_s64)"]
8033#[doc = "## Safety"]
8034#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
8035#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
8036#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
8037#[inline(always)]
8038#[target_feature(enable = "sve,sve2")]
8039#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8040#[cfg_attr(test, assert_instr(ldnt1w))]
8041pub unsafe fn svldnt1uw_gather_u64index_s64(
8042 pg: svbool_t,
8043 base: *const u32,
8044 indices: svuint64_t,
8045) -> svint64_t {
8046 svldnt1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed()
8047}
8048#[doc = "Load 16-bit data and zero-extend, non-temporal"]
8049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]index_u64)"]
8050#[doc = "## Safety"]
8051#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
8052#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
8053#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
8054#[inline(always)]
8055#[target_feature(enable = "sve,sve2")]
8056#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8057#[cfg_attr(test, assert_instr(ldnt1h))]
8058pub unsafe fn svldnt1uh_gather_u64index_u64(
8059 pg: svbool_t,
8060 base: *const u16,
8061 indices: svuint64_t,
8062) -> svuint64_t {
8063 svldnt1uh_gather_s64index_u64(pg, base, indices.as_signed())
8064}
8065#[doc = "Load 32-bit data and zero-extend, non-temporal"]
8066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]index_u64)"]
8067#[doc = "## Safety"]
8068#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
8069#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
8070#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
8071#[inline(always)]
8072#[target_feature(enable = "sve,sve2")]
8073#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8074#[cfg_attr(test, assert_instr(ldnt1w))]
8075pub unsafe fn svldnt1uw_gather_u64index_u64(
8076 pg: svbool_t,
8077 base: *const u32,
8078 indices: svuint64_t,
8079) -> svuint64_t {
8080 svldnt1uw_gather_s64index_u64(pg, base, indices.as_signed())
8081}
8082#[doc = "Load 16-bit data and zero-extend, non-temporal"]
8083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_index_s32)"]
8084#[doc = "## Safety"]
8085#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
8086#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
8087#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
8088#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
8089#[inline(always)]
8090#[target_feature(enable = "sve,sve2")]
8091#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8092#[cfg_attr(test, assert_instr(ldnt1h))]
8093pub unsafe fn svldnt1uh_gather_u32base_index_s32(
8094 pg: svbool_t,
8095 bases: svuint32_t,
8096 index: i64,
8097) -> svint32_t {
8098 svldnt1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1))
8099}
8100#[doc = "Load 16-bit data and zero-extend, non-temporal"]
8101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_index_u32)"]
8102#[doc = "## Safety"]
8103#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
8104#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
8105#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
8106#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
8107#[inline(always)]
8108#[target_feature(enable = "sve,sve2")]
8109#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8110#[cfg_attr(test, assert_instr(ldnt1h))]
8111pub unsafe fn svldnt1uh_gather_u32base_index_u32(
8112 pg: svbool_t,
8113 bases: svuint32_t,
8114 index: i64,
8115) -> svuint32_t {
8116 svldnt1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1))
8117}
8118#[doc = "Load 16-bit data and zero-extend, non-temporal"]
8119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_index_s64)"]
8120#[doc = "## Safety"]
8121#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
8122#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
8123#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
8124#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
8125#[inline(always)]
8126#[target_feature(enable = "sve,sve2")]
8127#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8128#[cfg_attr(test, assert_instr(ldnt1h))]
8129pub unsafe fn svldnt1uh_gather_u64base_index_s64(
8130 pg: svbool_t,
8131 bases: svuint64_t,
8132 index: i64,
8133) -> svint64_t {
8134 svldnt1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1))
8135}
8136#[doc = "Load 32-bit data and zero-extend, non-temporal"]
8137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_index_s64)"]
8138#[doc = "## Safety"]
8139#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
8140#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
8141#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
8142#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
8143#[inline(always)]
8144#[target_feature(enable = "sve,sve2")]
8145#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8146#[cfg_attr(test, assert_instr(ldnt1w))]
8147pub unsafe fn svldnt1uw_gather_u64base_index_s64(
8148 pg: svbool_t,
8149 bases: svuint64_t,
8150 index: i64,
8151) -> svint64_t {
8152 svldnt1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2))
8153}
8154#[doc = "Load 16-bit data and zero-extend, non-temporal"]
8155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_index_u64)"]
8156#[doc = "## Safety"]
8157#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
8158#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
8159#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
8160#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
8161#[inline(always)]
8162#[target_feature(enable = "sve,sve2")]
8163#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8164#[cfg_attr(test, assert_instr(ldnt1h))]
8165pub unsafe fn svldnt1uh_gather_u64base_index_u64(
8166 pg: svbool_t,
8167 bases: svuint64_t,
8168 index: i64,
8169) -> svuint64_t {
8170 svldnt1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1))
8171}
8172#[doc = "Load 32-bit data and zero-extend, non-temporal"]
8173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_index_u64)"]
8174#[doc = "## Safety"]
8175#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
8176#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
8177#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
8178#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
8179#[inline(always)]
8180#[target_feature(enable = "sve,sve2")]
8181#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8182#[cfg_attr(test, assert_instr(ldnt1w))]
8183pub unsafe fn svldnt1uw_gather_u64base_index_u64(
8184 pg: svbool_t,
8185 bases: svuint64_t,
8186 index: i64,
8187) -> svuint64_t {
8188 svldnt1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2))
8189}
8190#[doc = "Base 2 logarithm as integer"]
8191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_m)"]
8192#[inline(always)]
8193#[target_feature(enable = "sve,sve2")]
8194#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8195#[cfg_attr(test, assert_instr(flogb))]
8196pub fn svlogb_f32_m(inactive: svint32_t, pg: svbool_t, op: svfloat32_t) -> svint32_t {
8197 unsafe extern "unadjusted" {
8198 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.flogb.nxv4f32")]
8199 fn _svlogb_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t;
8200 }
8201 unsafe { _svlogb_f32_m(inactive, pg.sve_into(), op) }
8202}
8203#[doc = "Base 2 logarithm as integer"]
8204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_x)"]
8205#[inline(always)]
8206#[target_feature(enable = "sve,sve2")]
8207#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8208#[cfg_attr(test, assert_instr(flogb))]
8209pub fn svlogb_f32_x(pg: svbool_t, op: svfloat32_t) -> svint32_t {
8210 unsafe { svlogb_f32_m(transmute_unchecked(op), pg, op) }
8211}
8212#[doc = "Base 2 logarithm as integer"]
8213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_z)"]
8214#[inline(always)]
8215#[target_feature(enable = "sve,sve2")]
8216#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8217#[cfg_attr(test, assert_instr(flogb))]
8218pub fn svlogb_f32_z(pg: svbool_t, op: svfloat32_t) -> svint32_t {
8219 svlogb_f32_m(svdup_n_s32(0), pg, op)
8220}
8221#[doc = "Base 2 logarithm as integer"]
8222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_m)"]
8223#[inline(always)]
8224#[target_feature(enable = "sve,sve2")]
8225#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8226#[cfg_attr(test, assert_instr(flogb))]
8227pub fn svlogb_f64_m(inactive: svint64_t, pg: svbool_t, op: svfloat64_t) -> svint64_t {
8228 unsafe extern "unadjusted" {
8229 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.flogb.nxv2f64")]
8230 fn _svlogb_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t;
8231 }
8232 unsafe { _svlogb_f64_m(inactive, pg.sve_into(), op) }
8233}
8234#[doc = "Base 2 logarithm as integer"]
8235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_x)"]
8236#[inline(always)]
8237#[target_feature(enable = "sve,sve2")]
8238#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8239#[cfg_attr(test, assert_instr(flogb))]
8240pub fn svlogb_f64_x(pg: svbool_t, op: svfloat64_t) -> svint64_t {
8241 unsafe { svlogb_f64_m(transmute_unchecked(op), pg, op) }
8242}
8243#[doc = "Base 2 logarithm as integer"]
8244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_z)"]
8245#[inline(always)]
8246#[target_feature(enable = "sve,sve2")]
8247#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8248#[cfg_attr(test, assert_instr(flogb))]
8249pub fn svlogb_f64_z(pg: svbool_t, op: svfloat64_t) -> svint64_t {
8250 svlogb_f64_m(svdup_n_s64(0), pg, op)
8251}
8252#[doc = "Detect any matching elements"]
8253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_s8])"]
8254#[inline(always)]
8255#[target_feature(enable = "sve,sve2")]
8256#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8257#[cfg_attr(test, assert_instr(match))]
8258pub fn svmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t {
8259 unsafe extern "unadjusted" {
8260 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.match.nxv16i8")]
8261 fn _svmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t;
8262 }
8263 unsafe { _svmatch_s8(pg, op1, op2) }
8264}
8265#[doc = "Detect any matching elements"]
8266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_s16])"]
8267#[inline(always)]
8268#[target_feature(enable = "sve,sve2")]
8269#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8270#[cfg_attr(test, assert_instr(match))]
8271pub fn svmatch_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t {
8272 unsafe extern "unadjusted" {
8273 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.match.nxv8i16")]
8274 fn _svmatch_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t;
8275 }
8276 unsafe { _svmatch_s16(pg.sve_into(), op1, op2).sve_into() }
8277}
8278#[doc = "Detect any matching elements"]
8279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_u8])"]
8280#[inline(always)]
8281#[target_feature(enable = "sve,sve2")]
8282#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8283#[cfg_attr(test, assert_instr(match))]
8284pub fn svmatch_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t {
8285 unsafe { svmatch_s8(pg, op1.as_signed(), op2.as_signed()) }
8286}
8287#[doc = "Detect any matching elements"]
8288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_u16])"]
8289#[inline(always)]
8290#[target_feature(enable = "sve,sve2")]
8291#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8292#[cfg_attr(test, assert_instr(match))]
8293pub fn svmatch_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t {
8294 unsafe { svmatch_s16(pg, op1.as_signed(), op2.as_signed()) }
8295}
8296#[doc = "Maximum number pairwise"]
8297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f32]_m)"]
8298#[inline(always)]
8299#[target_feature(enable = "sve,sve2")]
8300#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8301#[cfg_attr(test, assert_instr(fmaxnmp))]
8302pub fn svmaxnmp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
8303 unsafe extern "unadjusted" {
8304 #[cfg_attr(
8305 target_arch = "aarch64",
8306 link_name = "llvm.aarch64.sve.fmaxnmp.nxv4f32"
8307 )]
8308 fn _svmaxnmp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
8309 }
8310 unsafe { _svmaxnmp_f32_m(pg.sve_into(), op1, op2) }
8311}
8312#[doc = "Maximum number pairwise"]
8313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f32]_x)"]
8314#[inline(always)]
8315#[target_feature(enable = "sve,sve2")]
8316#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8317#[cfg_attr(test, assert_instr(fmaxnmp))]
8318pub fn svmaxnmp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
8319 svmaxnmp_f32_m(pg, op1, op2)
8320}
8321#[doc = "Maximum number pairwise"]
8322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f64]_m)"]
8323#[inline(always)]
8324#[target_feature(enable = "sve,sve2")]
8325#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8326#[cfg_attr(test, assert_instr(fmaxnmp))]
8327pub fn svmaxnmp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
8328 unsafe extern "unadjusted" {
8329 #[cfg_attr(
8330 target_arch = "aarch64",
8331 link_name = "llvm.aarch64.sve.fmaxnmp.nxv2f64"
8332 )]
8333 fn _svmaxnmp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
8334 }
8335 unsafe { _svmaxnmp_f64_m(pg.sve_into(), op1, op2) }
8336}
8337#[doc = "Maximum number pairwise"]
8338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f64]_x)"]
8339#[inline(always)]
8340#[target_feature(enable = "sve,sve2")]
8341#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8342#[cfg_attr(test, assert_instr(fmaxnmp))]
8343pub fn svmaxnmp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
8344 svmaxnmp_f64_m(pg, op1, op2)
8345}
8346#[doc = "Maximum pairwise"]
8347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f32]_m)"]
8348#[inline(always)]
8349#[target_feature(enable = "sve,sve2")]
8350#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8351#[cfg_attr(test, assert_instr(fmaxp))]
8352pub fn svmaxp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
8353 unsafe extern "unadjusted" {
8354 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxp.nxv4f32")]
8355 fn _svmaxp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
8356 }
8357 unsafe { _svmaxp_f32_m(pg.sve_into(), op1, op2) }
8358}
8359#[doc = "Maximum pairwise"]
8360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f32]_x)"]
8361#[inline(always)]
8362#[target_feature(enable = "sve,sve2")]
8363#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8364#[cfg_attr(test, assert_instr(fmaxp))]
8365pub fn svmaxp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
8366 svmaxp_f32_m(pg, op1, op2)
8367}
8368#[doc = "Maximum pairwise"]
8369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f64]_m)"]
8370#[inline(always)]
8371#[target_feature(enable = "sve,sve2")]
8372#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8373#[cfg_attr(test, assert_instr(fmaxp))]
8374pub fn svmaxp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
8375 unsafe extern "unadjusted" {
8376 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxp.nxv2f64")]
8377 fn _svmaxp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
8378 }
8379 unsafe { _svmaxp_f64_m(pg.sve_into(), op1, op2) }
8380}
8381#[doc = "Maximum pairwise"]
8382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f64]_x)"]
8383#[inline(always)]
8384#[target_feature(enable = "sve,sve2")]
8385#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8386#[cfg_attr(test, assert_instr(fmaxp))]
8387pub fn svmaxp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
8388 svmaxp_f64_m(pg, op1, op2)
8389}
8390#[doc = "Maximum pairwise"]
8391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s8]_m)"]
8392#[inline(always)]
8393#[target_feature(enable = "sve,sve2")]
8394#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8395#[cfg_attr(test, assert_instr(smaxp))]
8396pub fn svmaxp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
8397 unsafe extern "unadjusted" {
8398 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv16i8")]
8399 fn _svmaxp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
8400 }
8401 unsafe { _svmaxp_s8_m(pg, op1, op2) }
8402}
8403#[doc = "Maximum pairwise"]
8404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s8]_x)"]
8405#[inline(always)]
8406#[target_feature(enable = "sve,sve2")]
8407#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8408#[cfg_attr(test, assert_instr(smaxp))]
8409pub fn svmaxp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
8410 svmaxp_s8_m(pg, op1, op2)
8411}
8412#[doc = "Maximum pairwise"]
8413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s16]_m)"]
8414#[inline(always)]
8415#[target_feature(enable = "sve,sve2")]
8416#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8417#[cfg_attr(test, assert_instr(smaxp))]
8418pub fn svmaxp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
8419 unsafe extern "unadjusted" {
8420 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv8i16")]
8421 fn _svmaxp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
8422 }
8423 unsafe { _svmaxp_s16_m(pg.sve_into(), op1, op2) }
8424}
8425#[doc = "Maximum pairwise"]
8426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s16]_x)"]
8427#[inline(always)]
8428#[target_feature(enable = "sve,sve2")]
8429#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8430#[cfg_attr(test, assert_instr(smaxp))]
8431pub fn svmaxp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
8432 svmaxp_s16_m(pg, op1, op2)
8433}
8434#[doc = "Maximum pairwise"]
8435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s32]_m)"]
8436#[inline(always)]
8437#[target_feature(enable = "sve,sve2")]
8438#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8439#[cfg_attr(test, assert_instr(smaxp))]
8440pub fn svmaxp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
8441 unsafe extern "unadjusted" {
8442 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv4i32")]
8443 fn _svmaxp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
8444 }
8445 unsafe { _svmaxp_s32_m(pg.sve_into(), op1, op2) }
8446}
8447#[doc = "Maximum pairwise"]
8448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s32]_x)"]
8449#[inline(always)]
8450#[target_feature(enable = "sve,sve2")]
8451#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8452#[cfg_attr(test, assert_instr(smaxp))]
8453pub fn svmaxp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
8454 svmaxp_s32_m(pg, op1, op2)
8455}
8456#[doc = "Maximum pairwise"]
8457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s64]_m)"]
8458#[inline(always)]
8459#[target_feature(enable = "sve,sve2")]
8460#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8461#[cfg_attr(test, assert_instr(smaxp))]
8462pub fn svmaxp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
8463 unsafe extern "unadjusted" {
8464 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv2i64")]
8465 fn _svmaxp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
8466 }
8467 unsafe { _svmaxp_s64_m(pg.sve_into(), op1, op2) }
8468}
8469#[doc = "Maximum pairwise"]
8470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s64]_x)"]
8471#[inline(always)]
8472#[target_feature(enable = "sve,sve2")]
8473#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8474#[cfg_attr(test, assert_instr(smaxp))]
8475pub fn svmaxp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
8476 svmaxp_s64_m(pg, op1, op2)
8477}
8478#[doc = "Maximum pairwise"]
8479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u8]_m)"]
8480#[inline(always)]
8481#[target_feature(enable = "sve,sve2")]
8482#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8483#[cfg_attr(test, assert_instr(umaxp))]
8484pub fn svmaxp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
8485 unsafe extern "unadjusted" {
8486 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv16i8")]
8487 fn _svmaxp_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
8488 }
8489 unsafe { _svmaxp_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
8490}
8491#[doc = "Maximum pairwise"]
8492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u8]_x)"]
8493#[inline(always)]
8494#[target_feature(enable = "sve,sve2")]
8495#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8496#[cfg_attr(test, assert_instr(umaxp))]
8497pub fn svmaxp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
8498 svmaxp_u8_m(pg, op1, op2)
8499}
8500#[doc = "Maximum pairwise"]
8501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u16]_m)"]
8502#[inline(always)]
8503#[target_feature(enable = "sve,sve2")]
8504#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8505#[cfg_attr(test, assert_instr(umaxp))]
8506pub fn svmaxp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
8507 unsafe extern "unadjusted" {
8508 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv8i16")]
8509 fn _svmaxp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
8510 }
8511 unsafe { _svmaxp_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
8512}
8513#[doc = "Maximum pairwise"]
8514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u16]_x)"]
8515#[inline(always)]
8516#[target_feature(enable = "sve,sve2")]
8517#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8518#[cfg_attr(test, assert_instr(umaxp))]
8519pub fn svmaxp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
8520 svmaxp_u16_m(pg, op1, op2)
8521}
8522#[doc = "Maximum pairwise"]
8523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u32]_m)"]
8524#[inline(always)]
8525#[target_feature(enable = "sve,sve2")]
8526#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8527#[cfg_attr(test, assert_instr(umaxp))]
8528pub fn svmaxp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
8529 unsafe extern "unadjusted" {
8530 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv4i32")]
8531 fn _svmaxp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
8532 }
8533 unsafe { _svmaxp_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
8534}
8535#[doc = "Maximum pairwise"]
8536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u32]_x)"]
8537#[inline(always)]
8538#[target_feature(enable = "sve,sve2")]
8539#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8540#[cfg_attr(test, assert_instr(umaxp))]
8541pub fn svmaxp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
8542 svmaxp_u32_m(pg, op1, op2)
8543}
8544#[doc = "Maximum pairwise"]
8545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u64]_m)"]
8546#[inline(always)]
8547#[target_feature(enable = "sve,sve2")]
8548#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8549#[cfg_attr(test, assert_instr(umaxp))]
8550pub fn svmaxp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
8551 unsafe extern "unadjusted" {
8552 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv2i64")]
8553 fn _svmaxp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
8554 }
8555 unsafe { _svmaxp_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
8556}
8557#[doc = "Maximum pairwise"]
8558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u64]_x)"]
8559#[inline(always)]
8560#[target_feature(enable = "sve,sve2")]
8561#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8562#[cfg_attr(test, assert_instr(umaxp))]
8563pub fn svmaxp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
8564 svmaxp_u64_m(pg, op1, op2)
8565}
8566#[doc = "Minimum number pairwise"]
8567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f32]_m)"]
8568#[inline(always)]
8569#[target_feature(enable = "sve,sve2")]
8570#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8571#[cfg_attr(test, assert_instr(fminnmp))]
8572pub fn svminnmp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
8573 unsafe extern "unadjusted" {
8574 #[cfg_attr(
8575 target_arch = "aarch64",
8576 link_name = "llvm.aarch64.sve.fminnmp.nxv4f32"
8577 )]
8578 fn _svminnmp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
8579 }
8580 unsafe { _svminnmp_f32_m(pg.sve_into(), op1, op2) }
8581}
8582#[doc = "Minimum number pairwise"]
8583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f32]_x)"]
8584#[inline(always)]
8585#[target_feature(enable = "sve,sve2")]
8586#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8587#[cfg_attr(test, assert_instr(fminnmp))]
8588pub fn svminnmp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
8589 svminnmp_f32_m(pg, op1, op2)
8590}
8591#[doc = "Minimum number pairwise"]
8592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f64]_m)"]
8593#[inline(always)]
8594#[target_feature(enable = "sve,sve2")]
8595#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8596#[cfg_attr(test, assert_instr(fminnmp))]
8597pub fn svminnmp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
8598 unsafe extern "unadjusted" {
8599 #[cfg_attr(
8600 target_arch = "aarch64",
8601 link_name = "llvm.aarch64.sve.fminnmp.nxv2f64"
8602 )]
8603 fn _svminnmp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
8604 }
8605 unsafe { _svminnmp_f64_m(pg.sve_into(), op1, op2) }
8606}
8607#[doc = "Minimum number pairwise"]
8608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f64]_x)"]
8609#[inline(always)]
8610#[target_feature(enable = "sve,sve2")]
8611#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8612#[cfg_attr(test, assert_instr(fminnmp))]
8613pub fn svminnmp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
8614 svminnmp_f64_m(pg, op1, op2)
8615}
8616#[doc = "Minimum pairwise"]
8617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f32]_m)"]
8618#[inline(always)]
8619#[target_feature(enable = "sve,sve2")]
8620#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8621#[cfg_attr(test, assert_instr(fminp))]
8622pub fn svminp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
8623 unsafe extern "unadjusted" {
8624 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminp.nxv4f32")]
8625 fn _svminp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
8626 }
8627 unsafe { _svminp_f32_m(pg.sve_into(), op1, op2) }
8628}
8629#[doc = "Minimum pairwise"]
8630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f32]_x)"]
8631#[inline(always)]
8632#[target_feature(enable = "sve,sve2")]
8633#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8634#[cfg_attr(test, assert_instr(fminp))]
8635pub fn svminp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
8636 svminp_f32_m(pg, op1, op2)
8637}
8638#[doc = "Minimum pairwise"]
8639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f64]_m)"]
8640#[inline(always)]
8641#[target_feature(enable = "sve,sve2")]
8642#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8643#[cfg_attr(test, assert_instr(fminp))]
8644pub fn svminp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
8645 unsafe extern "unadjusted" {
8646 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminp.nxv2f64")]
8647 fn _svminp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
8648 }
8649 unsafe { _svminp_f64_m(pg.sve_into(), op1, op2) }
8650}
8651#[doc = "Minimum pairwise"]
8652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f64]_x)"]
8653#[inline(always)]
8654#[target_feature(enable = "sve,sve2")]
8655#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8656#[cfg_attr(test, assert_instr(fminp))]
8657pub fn svminp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
8658 svminp_f64_m(pg, op1, op2)
8659}
8660#[doc = "Minimum pairwise"]
8661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s8]_m)"]
8662#[inline(always)]
8663#[target_feature(enable = "sve,sve2")]
8664#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8665#[cfg_attr(test, assert_instr(sminp))]
8666pub fn svminp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
8667 unsafe extern "unadjusted" {
8668 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv16i8")]
8669 fn _svminp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
8670 }
8671 unsafe { _svminp_s8_m(pg, op1, op2) }
8672}
8673#[doc = "Minimum pairwise"]
8674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s8]_x)"]
8675#[inline(always)]
8676#[target_feature(enable = "sve,sve2")]
8677#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8678#[cfg_attr(test, assert_instr(sminp))]
8679pub fn svminp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
8680 svminp_s8_m(pg, op1, op2)
8681}
8682#[doc = "Minimum pairwise"]
8683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s16]_m)"]
8684#[inline(always)]
8685#[target_feature(enable = "sve,sve2")]
8686#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8687#[cfg_attr(test, assert_instr(sminp))]
8688pub fn svminp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
8689 unsafe extern "unadjusted" {
8690 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv8i16")]
8691 fn _svminp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
8692 }
8693 unsafe { _svminp_s16_m(pg.sve_into(), op1, op2) }
8694}
8695#[doc = "Minimum pairwise"]
8696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s16]_x)"]
8697#[inline(always)]
8698#[target_feature(enable = "sve,sve2")]
8699#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8700#[cfg_attr(test, assert_instr(sminp))]
8701pub fn svminp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
8702 svminp_s16_m(pg, op1, op2)
8703}
8704#[doc = "Minimum pairwise"]
8705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s32]_m)"]
8706#[inline(always)]
8707#[target_feature(enable = "sve,sve2")]
8708#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8709#[cfg_attr(test, assert_instr(sminp))]
8710pub fn svminp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
8711 unsafe extern "unadjusted" {
8712 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv4i32")]
8713 fn _svminp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
8714 }
8715 unsafe { _svminp_s32_m(pg.sve_into(), op1, op2) }
8716}
8717#[doc = "Minimum pairwise"]
8718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s32]_x)"]
8719#[inline(always)]
8720#[target_feature(enable = "sve,sve2")]
8721#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8722#[cfg_attr(test, assert_instr(sminp))]
8723pub fn svminp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
8724 svminp_s32_m(pg, op1, op2)
8725}
8726#[doc = "Minimum pairwise"]
8727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s64]_m)"]
8728#[inline(always)]
8729#[target_feature(enable = "sve,sve2")]
8730#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8731#[cfg_attr(test, assert_instr(sminp))]
8732pub fn svminp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
8733 unsafe extern "unadjusted" {
8734 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv2i64")]
8735 fn _svminp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
8736 }
8737 unsafe { _svminp_s64_m(pg.sve_into(), op1, op2) }
8738}
8739#[doc = "Minimum pairwise"]
8740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s64]_x)"]
8741#[inline(always)]
8742#[target_feature(enable = "sve,sve2")]
8743#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8744#[cfg_attr(test, assert_instr(sminp))]
8745pub fn svminp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
8746 svminp_s64_m(pg, op1, op2)
8747}
8748#[doc = "Minimum pairwise"]
8749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u8]_m)"]
8750#[inline(always)]
8751#[target_feature(enable = "sve,sve2")]
8752#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8753#[cfg_attr(test, assert_instr(uminp))]
8754pub fn svminp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
8755 unsafe extern "unadjusted" {
8756 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv16i8")]
8757 fn _svminp_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
8758 }
8759 unsafe { _svminp_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
8760}
8761#[doc = "Minimum pairwise"]
8762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u8]_x)"]
8763#[inline(always)]
8764#[target_feature(enable = "sve,sve2")]
8765#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8766#[cfg_attr(test, assert_instr(uminp))]
8767pub fn svminp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
8768 svminp_u8_m(pg, op1, op2)
8769}
8770#[doc = "Minimum pairwise"]
8771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u16]_m)"]
8772#[inline(always)]
8773#[target_feature(enable = "sve,sve2")]
8774#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8775#[cfg_attr(test, assert_instr(uminp))]
8776pub fn svminp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
8777 unsafe extern "unadjusted" {
8778 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv8i16")]
8779 fn _svminp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
8780 }
8781 unsafe { _svminp_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
8782}
8783#[doc = "Minimum pairwise"]
8784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u16]_x)"]
8785#[inline(always)]
8786#[target_feature(enable = "sve,sve2")]
8787#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8788#[cfg_attr(test, assert_instr(uminp))]
8789pub fn svminp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
8790 svminp_u16_m(pg, op1, op2)
8791}
8792#[doc = "Minimum pairwise"]
8793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u32]_m)"]
8794#[inline(always)]
8795#[target_feature(enable = "sve,sve2")]
8796#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8797#[cfg_attr(test, assert_instr(uminp))]
8798pub fn svminp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
8799 unsafe extern "unadjusted" {
8800 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv4i32")]
8801 fn _svminp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
8802 }
8803 unsafe { _svminp_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
8804}
8805#[doc = "Minimum pairwise"]
8806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u32]_x)"]
8807#[inline(always)]
8808#[target_feature(enable = "sve,sve2")]
8809#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8810#[cfg_attr(test, assert_instr(uminp))]
8811pub fn svminp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
8812 svminp_u32_m(pg, op1, op2)
8813}
8814#[doc = "Minimum pairwise"]
8815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u64]_m)"]
8816#[inline(always)]
8817#[target_feature(enable = "sve,sve2")]
8818#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8819#[cfg_attr(test, assert_instr(uminp))]
8820pub fn svminp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
8821 unsafe extern "unadjusted" {
8822 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv2i64")]
8823 fn _svminp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
8824 }
8825 unsafe { _svminp_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
8826}
8827#[doc = "Minimum pairwise"]
8828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u64]_x)"]
8829#[inline(always)]
8830#[target_feature(enable = "sve,sve2")]
8831#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8832#[cfg_attr(test, assert_instr(uminp))]
8833pub fn svminp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
8834 svminp_u64_m(pg, op1, op2)
8835}
8836#[doc = "Multiply-add, addend first"]
8837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s16])"]
8838#[inline(always)]
8839#[target_feature(enable = "sve,sve2")]
8840#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8841#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))]
8842pub fn svmla_lane_s16<const IMM_INDEX: i32>(
8843 op1: svint16_t,
8844 op2: svint16_t,
8845 op3: svint16_t,
8846) -> svint16_t {
8847 static_assert_range!(IMM_INDEX, 0..=7);
8848 unsafe extern "unadjusted" {
8849 #[cfg_attr(
8850 target_arch = "aarch64",
8851 link_name = "llvm.aarch64.sve.mla.lane.nxv8i16"
8852 )]
8853 fn _svmla_lane_s16(
8854 op1: svint16_t,
8855 op2: svint16_t,
8856 op3: svint16_t,
8857 IMM_INDEX: i32,
8858 ) -> svint16_t;
8859 }
8860 unsafe { _svmla_lane_s16(op1, op2, op3, IMM_INDEX) }
8861}
8862#[doc = "Multiply-add, addend first"]
8863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s32])"]
8864#[inline(always)]
8865#[target_feature(enable = "sve,sve2")]
8866#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8867#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))]
8868pub fn svmla_lane_s32<const IMM_INDEX: i32>(
8869 op1: svint32_t,
8870 op2: svint32_t,
8871 op3: svint32_t,
8872) -> svint32_t {
8873 static_assert_range!(IMM_INDEX, 0..=3);
8874 unsafe extern "unadjusted" {
8875 #[cfg_attr(
8876 target_arch = "aarch64",
8877 link_name = "llvm.aarch64.sve.mla.lane.nxv4i32"
8878 )]
8879 fn _svmla_lane_s32(
8880 op1: svint32_t,
8881 op2: svint32_t,
8882 op3: svint32_t,
8883 IMM_INDEX: i32,
8884 ) -> svint32_t;
8885 }
8886 unsafe { _svmla_lane_s32(op1, op2, op3, IMM_INDEX) }
8887}
8888#[doc = "Multiply-add, addend first"]
8889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s64])"]
8890#[inline(always)]
8891#[target_feature(enable = "sve,sve2")]
8892#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8893#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))]
8894pub fn svmla_lane_s64<const IMM_INDEX: i32>(
8895 op1: svint64_t,
8896 op2: svint64_t,
8897 op3: svint64_t,
8898) -> svint64_t {
8899 static_assert_range!(IMM_INDEX, 0..=1);
8900 unsafe extern "unadjusted" {
8901 #[cfg_attr(
8902 target_arch = "aarch64",
8903 link_name = "llvm.aarch64.sve.mla.lane.nxv2i64"
8904 )]
8905 fn _svmla_lane_s64(
8906 op1: svint64_t,
8907 op2: svint64_t,
8908 op3: svint64_t,
8909 IMM_INDEX: i32,
8910 ) -> svint64_t;
8911 }
8912 unsafe { _svmla_lane_s64(op1, op2, op3, IMM_INDEX) }
8913}
8914#[doc = "Multiply-add, addend first"]
8915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u16])"]
8916#[inline(always)]
8917#[target_feature(enable = "sve,sve2")]
8918#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8919#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))]
8920pub fn svmla_lane_u16<const IMM_INDEX: i32>(
8921 op1: svuint16_t,
8922 op2: svuint16_t,
8923 op3: svuint16_t,
8924) -> svuint16_t {
8925 static_assert_range!(IMM_INDEX, 0..=7);
8926 unsafe {
8927 svmla_lane_s16::<IMM_INDEX>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
8928 }
8929}
8930#[doc = "Multiply-add, addend first"]
8931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u32])"]
8932#[inline(always)]
8933#[target_feature(enable = "sve,sve2")]
8934#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8935#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))]
8936pub fn svmla_lane_u32<const IMM_INDEX: i32>(
8937 op1: svuint32_t,
8938 op2: svuint32_t,
8939 op3: svuint32_t,
8940) -> svuint32_t {
8941 static_assert_range!(IMM_INDEX, 0..=3);
8942 unsafe {
8943 svmla_lane_s32::<IMM_INDEX>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
8944 }
8945}
8946#[doc = "Multiply-add, addend first"]
8947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u64])"]
8948#[inline(always)]
8949#[target_feature(enable = "sve,sve2")]
8950#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8951#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))]
8952pub fn svmla_lane_u64<const IMM_INDEX: i32>(
8953 op1: svuint64_t,
8954 op2: svuint64_t,
8955 op3: svuint64_t,
8956) -> svuint64_t {
8957 static_assert_range!(IMM_INDEX, 0..=1);
8958 unsafe {
8959 svmla_lane_s64::<IMM_INDEX>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
8960 }
8961}
8962#[doc = "Multiply-add long (bottom)"]
8963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_s32])"]
8964#[inline(always)]
8965#[target_feature(enable = "sve,sve2")]
8966#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8967#[cfg_attr(test, assert_instr(smlalb, IMM_INDEX = 0))]
8968pub fn svmlalb_lane_s32<const IMM_INDEX: i32>(
8969 op1: svint32_t,
8970 op2: svint16_t,
8971 op3: svint16_t,
8972) -> svint32_t {
8973 static_assert_range!(IMM_INDEX, 0..=7);
8974 unsafe extern "unadjusted" {
8975 #[cfg_attr(
8976 target_arch = "aarch64",
8977 link_name = "llvm.aarch64.sve.smlalb.lane.nxv4i32"
8978 )]
8979 fn _svmlalb_lane_s32(
8980 op1: svint32_t,
8981 op2: svint16_t,
8982 op3: svint16_t,
8983 IMM_INDEX: i32,
8984 ) -> svint32_t;
8985 }
8986 unsafe { _svmlalb_lane_s32(op1, op2, op3, IMM_INDEX) }
8987}
8988#[doc = "Multiply-add long (bottom)"]
8989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_s64])"]
8990#[inline(always)]
8991#[target_feature(enable = "sve,sve2")]
8992#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8993#[cfg_attr(test, assert_instr(smlalb, IMM_INDEX = 0))]
8994pub fn svmlalb_lane_s64<const IMM_INDEX: i32>(
8995 op1: svint64_t,
8996 op2: svint32_t,
8997 op3: svint32_t,
8998) -> svint64_t {
8999 static_assert_range!(IMM_INDEX, 0..=3);
9000 unsafe extern "unadjusted" {
9001 #[cfg_attr(
9002 target_arch = "aarch64",
9003 link_name = "llvm.aarch64.sve.smlalb.lane.nxv2i64"
9004 )]
9005 fn _svmlalb_lane_s64(
9006 op1: svint64_t,
9007 op2: svint32_t,
9008 op3: svint32_t,
9009 IMM_INDEX: i32,
9010 ) -> svint64_t;
9011 }
9012 unsafe { _svmlalb_lane_s64(op1, op2, op3, IMM_INDEX) }
9013}
9014#[doc = "Multiply-add long (bottom)"]
9015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_u32])"]
9016#[inline(always)]
9017#[target_feature(enable = "sve,sve2")]
9018#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9019#[cfg_attr(test, assert_instr(umlalb, IMM_INDEX = 0))]
9020pub fn svmlalb_lane_u32<const IMM_INDEX: i32>(
9021 op1: svuint32_t,
9022 op2: svuint16_t,
9023 op3: svuint16_t,
9024) -> svuint32_t {
9025 static_assert_range!(IMM_INDEX, 0..=7);
9026 unsafe extern "unadjusted" {
9027 #[cfg_attr(
9028 target_arch = "aarch64",
9029 link_name = "llvm.aarch64.sve.umlalb.lane.nxv4i32"
9030 )]
9031 fn _svmlalb_lane_u32(
9032 op1: svint32_t,
9033 op2: svint16_t,
9034 op3: svint16_t,
9035 IMM_INDEX: i32,
9036 ) -> svint32_t;
9037 }
9038 unsafe {
9039 _svmlalb_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX)
9040 .as_unsigned()
9041 }
9042}
9043#[doc = "Multiply-add long (bottom)"]
9044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_u64])"]
9045#[inline(always)]
9046#[target_feature(enable = "sve,sve2")]
9047#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9048#[cfg_attr(test, assert_instr(umlalb, IMM_INDEX = 0))]
9049pub fn svmlalb_lane_u64<const IMM_INDEX: i32>(
9050 op1: svuint64_t,
9051 op2: svuint32_t,
9052 op3: svuint32_t,
9053) -> svuint64_t {
9054 static_assert_range!(IMM_INDEX, 0..=3);
9055 unsafe extern "unadjusted" {
9056 #[cfg_attr(
9057 target_arch = "aarch64",
9058 link_name = "llvm.aarch64.sve.umlalb.lane.nxv2i64"
9059 )]
9060 fn _svmlalb_lane_u64(
9061 op1: svint64_t,
9062 op2: svint32_t,
9063 op3: svint32_t,
9064 IMM_INDEX: i32,
9065 ) -> svint64_t;
9066 }
9067 unsafe {
9068 _svmlalb_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX)
9069 .as_unsigned()
9070 }
9071}
9072#[doc = "Multiply-add long (bottom)"]
9073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s16])"]
9074#[inline(always)]
9075#[target_feature(enable = "sve,sve2")]
9076#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9077#[cfg_attr(test, assert_instr(smlalb))]
9078pub fn svmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
9079 unsafe extern "unadjusted" {
9080 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv8i16")]
9081 fn _svmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
9082 }
9083 unsafe { _svmlalb_s16(op1, op2, op3) }
9084}
9085#[doc = "Multiply-add long (bottom)"]
9086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s16])"]
9087#[inline(always)]
9088#[target_feature(enable = "sve,sve2")]
9089#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9090#[cfg_attr(test, assert_instr(smlalb))]
9091pub fn svmlalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
9092 svmlalb_s16(op1, op2, svdup_n_s8(op3))
9093}
9094#[doc = "Multiply-add long (bottom)"]
9095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s32])"]
9096#[inline(always)]
9097#[target_feature(enable = "sve,sve2")]
9098#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9099#[cfg_attr(test, assert_instr(smlalb))]
9100pub fn svmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
9101 unsafe extern "unadjusted" {
9102 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv4i32")]
9103 fn _svmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
9104 }
9105 unsafe { _svmlalb_s32(op1, op2, op3) }
9106}
9107#[doc = "Multiply-add long (bottom)"]
9108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s32])"]
9109#[inline(always)]
9110#[target_feature(enable = "sve,sve2")]
9111#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9112#[cfg_attr(test, assert_instr(smlalb))]
9113pub fn svmlalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
9114 svmlalb_s32(op1, op2, svdup_n_s16(op3))
9115}
9116#[doc = "Multiply-add long (bottom)"]
9117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s64])"]
9118#[inline(always)]
9119#[target_feature(enable = "sve,sve2")]
9120#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9121#[cfg_attr(test, assert_instr(smlalb))]
9122pub fn svmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
9123 unsafe extern "unadjusted" {
9124 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv2i64")]
9125 fn _svmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
9126 }
9127 unsafe { _svmlalb_s64(op1, op2, op3) }
9128}
9129#[doc = "Multiply-add long (bottom)"]
9130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s64])"]
9131#[inline(always)]
9132#[target_feature(enable = "sve,sve2")]
9133#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9134#[cfg_attr(test, assert_instr(smlalb))]
9135pub fn svmlalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
9136 svmlalb_s64(op1, op2, svdup_n_s32(op3))
9137}
9138#[doc = "Multiply-add long (bottom)"]
9139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u16])"]
9140#[inline(always)]
9141#[target_feature(enable = "sve,sve2")]
9142#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9143#[cfg_attr(test, assert_instr(umlalb))]
9144pub fn svmlalb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t {
9145 unsafe extern "unadjusted" {
9146 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv8i16")]
9147 fn _svmlalb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
9148 }
9149 unsafe { _svmlalb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
9150}
9151#[doc = "Multiply-add long (bottom)"]
9152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u16])"]
9153#[inline(always)]
9154#[target_feature(enable = "sve,sve2")]
9155#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9156#[cfg_attr(test, assert_instr(umlalb))]
9157pub fn svmlalb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t {
9158 svmlalb_u16(op1, op2, svdup_n_u8(op3))
9159}
9160#[doc = "Multiply-add long (bottom)"]
9161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u32])"]
9162#[inline(always)]
9163#[target_feature(enable = "sve,sve2")]
9164#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9165#[cfg_attr(test, assert_instr(umlalb))]
9166pub fn svmlalb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t {
9167 unsafe extern "unadjusted" {
9168 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv4i32")]
9169 fn _svmlalb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
9170 }
9171 unsafe { _svmlalb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
9172}
9173#[doc = "Multiply-add long (bottom)"]
9174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u32])"]
9175#[inline(always)]
9176#[target_feature(enable = "sve,sve2")]
9177#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9178#[cfg_attr(test, assert_instr(umlalb))]
9179pub fn svmlalb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t {
9180 svmlalb_u32(op1, op2, svdup_n_u16(op3))
9181}
9182#[doc = "Multiply-add long (bottom)"]
9183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u64])"]
9184#[inline(always)]
9185#[target_feature(enable = "sve,sve2")]
9186#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9187#[cfg_attr(test, assert_instr(umlalb))]
9188pub fn svmlalb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t {
9189 unsafe extern "unadjusted" {
9190 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv2i64")]
9191 fn _svmlalb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
9192 }
9193 unsafe { _svmlalb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
9194}
9195#[doc = "Multiply-add long (bottom)"]
9196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u64])"]
9197#[inline(always)]
9198#[target_feature(enable = "sve,sve2")]
9199#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9200#[cfg_attr(test, assert_instr(umlalb))]
9201pub fn svmlalb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t {
9202 svmlalb_u64(op1, op2, svdup_n_u32(op3))
9203}
9204#[doc = "Multiply-add long (top)"]
9205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_s32])"]
9206#[inline(always)]
9207#[target_feature(enable = "sve,sve2")]
9208#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9209#[cfg_attr(test, assert_instr(smlalt, IMM_INDEX = 0))]
9210pub fn svmlalt_lane_s32<const IMM_INDEX: i32>(
9211 op1: svint32_t,
9212 op2: svint16_t,
9213 op3: svint16_t,
9214) -> svint32_t {
9215 static_assert_range!(IMM_INDEX, 0..=7);
9216 unsafe extern "unadjusted" {
9217 #[cfg_attr(
9218 target_arch = "aarch64",
9219 link_name = "llvm.aarch64.sve.smlalt.lane.nxv4i32"
9220 )]
9221 fn _svmlalt_lane_s32(
9222 op1: svint32_t,
9223 op2: svint16_t,
9224 op3: svint16_t,
9225 IMM_INDEX: i32,
9226 ) -> svint32_t;
9227 }
9228 unsafe { _svmlalt_lane_s32(op1, op2, op3, IMM_INDEX) }
9229}
9230#[doc = "Multiply-add long (top)"]
9231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_s64])"]
9232#[inline(always)]
9233#[target_feature(enable = "sve,sve2")]
9234#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9235#[cfg_attr(test, assert_instr(smlalt, IMM_INDEX = 0))]
9236pub fn svmlalt_lane_s64<const IMM_INDEX: i32>(
9237 op1: svint64_t,
9238 op2: svint32_t,
9239 op3: svint32_t,
9240) -> svint64_t {
9241 static_assert_range!(IMM_INDEX, 0..=3);
9242 unsafe extern "unadjusted" {
9243 #[cfg_attr(
9244 target_arch = "aarch64",
9245 link_name = "llvm.aarch64.sve.smlalt.lane.nxv2i64"
9246 )]
9247 fn _svmlalt_lane_s64(
9248 op1: svint64_t,
9249 op2: svint32_t,
9250 op3: svint32_t,
9251 IMM_INDEX: i32,
9252 ) -> svint64_t;
9253 }
9254 unsafe { _svmlalt_lane_s64(op1, op2, op3, IMM_INDEX) }
9255}
9256#[doc = "Multiply-add long (top)"]
9257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_u32])"]
9258#[inline(always)]
9259#[target_feature(enable = "sve,sve2")]
9260#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9261#[cfg_attr(test, assert_instr(umlalt, IMM_INDEX = 0))]
9262pub fn svmlalt_lane_u32<const IMM_INDEX: i32>(
9263 op1: svuint32_t,
9264 op2: svuint16_t,
9265 op3: svuint16_t,
9266) -> svuint32_t {
9267 static_assert_range!(IMM_INDEX, 0..=7);
9268 unsafe extern "unadjusted" {
9269 #[cfg_attr(
9270 target_arch = "aarch64",
9271 link_name = "llvm.aarch64.sve.umlalt.lane.nxv4i32"
9272 )]
9273 fn _svmlalt_lane_u32(
9274 op1: svint32_t,
9275 op2: svint16_t,
9276 op3: svint16_t,
9277 IMM_INDEX: i32,
9278 ) -> svint32_t;
9279 }
9280 unsafe {
9281 _svmlalt_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX)
9282 .as_unsigned()
9283 }
9284}
9285#[doc = "Multiply-add long (top)"]
9286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_u64])"]
9287#[inline(always)]
9288#[target_feature(enable = "sve,sve2")]
9289#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9290#[cfg_attr(test, assert_instr(umlalt, IMM_INDEX = 0))]
9291pub fn svmlalt_lane_u64<const IMM_INDEX: i32>(
9292 op1: svuint64_t,
9293 op2: svuint32_t,
9294 op3: svuint32_t,
9295) -> svuint64_t {
9296 static_assert_range!(IMM_INDEX, 0..=3);
9297 unsafe extern "unadjusted" {
9298 #[cfg_attr(
9299 target_arch = "aarch64",
9300 link_name = "llvm.aarch64.sve.umlalt.lane.nxv2i64"
9301 )]
9302 fn _svmlalt_lane_u64(
9303 op1: svint64_t,
9304 op2: svint32_t,
9305 op3: svint32_t,
9306 IMM_INDEX: i32,
9307 ) -> svint64_t;
9308 }
9309 unsafe {
9310 _svmlalt_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX)
9311 .as_unsigned()
9312 }
9313}
9314#[doc = "Multiply-add long (top)"]
9315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s16])"]
9316#[inline(always)]
9317#[target_feature(enable = "sve,sve2")]
9318#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9319#[cfg_attr(test, assert_instr(smlalt))]
9320pub fn svmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
9321 unsafe extern "unadjusted" {
9322 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv8i16")]
9323 fn _svmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
9324 }
9325 unsafe { _svmlalt_s16(op1, op2, op3) }
9326}
9327#[doc = "Multiply-add long (top)"]
9328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s16])"]
9329#[inline(always)]
9330#[target_feature(enable = "sve,sve2")]
9331#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9332#[cfg_attr(test, assert_instr(smlalt))]
9333pub fn svmlalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
9334 svmlalt_s16(op1, op2, svdup_n_s8(op3))
9335}
9336#[doc = "Multiply-add long (top)"]
9337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s32])"]
9338#[inline(always)]
9339#[target_feature(enable = "sve,sve2")]
9340#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9341#[cfg_attr(test, assert_instr(smlalt))]
9342pub fn svmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
9343 unsafe extern "unadjusted" {
9344 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv4i32")]
9345 fn _svmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
9346 }
9347 unsafe { _svmlalt_s32(op1, op2, op3) }
9348}
9349#[doc = "Multiply-add long (top)"]
9350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s32])"]
9351#[inline(always)]
9352#[target_feature(enable = "sve,sve2")]
9353#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9354#[cfg_attr(test, assert_instr(smlalt))]
9355pub fn svmlalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
9356 svmlalt_s32(op1, op2, svdup_n_s16(op3))
9357}
9358#[doc = "Multiply-add long (top)"]
9359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s64])"]
9360#[inline(always)]
9361#[target_feature(enable = "sve,sve2")]
9362#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9363#[cfg_attr(test, assert_instr(smlalt))]
9364pub fn svmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
9365 unsafe extern "unadjusted" {
9366 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv2i64")]
9367 fn _svmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
9368 }
9369 unsafe { _svmlalt_s64(op1, op2, op3) }
9370}
9371#[doc = "Multiply-add long (top)"]
9372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s64])"]
9373#[inline(always)]
9374#[target_feature(enable = "sve,sve2")]
9375#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9376#[cfg_attr(test, assert_instr(smlalt))]
9377pub fn svmlalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
9378 svmlalt_s64(op1, op2, svdup_n_s32(op3))
9379}
9380#[doc = "Multiply-add long (top)"]
9381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u16])"]
9382#[inline(always)]
9383#[target_feature(enable = "sve,sve2")]
9384#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9385#[cfg_attr(test, assert_instr(umlalt))]
9386pub fn svmlalt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t {
9387 unsafe extern "unadjusted" {
9388 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv8i16")]
9389 fn _svmlalt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
9390 }
9391 unsafe { _svmlalt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
9392}
9393#[doc = "Multiply-add long (top)"]
9394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u16])"]
9395#[inline(always)]
9396#[target_feature(enable = "sve,sve2")]
9397#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9398#[cfg_attr(test, assert_instr(umlalt))]
9399pub fn svmlalt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t {
9400 svmlalt_u16(op1, op2, svdup_n_u8(op3))
9401}
9402#[doc = "Multiply-add long (top)"]
9403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u32])"]
9404#[inline(always)]
9405#[target_feature(enable = "sve,sve2")]
9406#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9407#[cfg_attr(test, assert_instr(umlalt))]
9408pub fn svmlalt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t {
9409 unsafe extern "unadjusted" {
9410 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv4i32")]
9411 fn _svmlalt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
9412 }
9413 unsafe { _svmlalt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
9414}
9415#[doc = "Multiply-add long (top)"]
9416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u32])"]
9417#[inline(always)]
9418#[target_feature(enable = "sve,sve2")]
9419#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9420#[cfg_attr(test, assert_instr(umlalt))]
9421pub fn svmlalt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t {
9422 svmlalt_u32(op1, op2, svdup_n_u16(op3))
9423}
9424#[doc = "Multiply-add long (top)"]
9425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u64])"]
9426#[inline(always)]
9427#[target_feature(enable = "sve,sve2")]
9428#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9429#[cfg_attr(test, assert_instr(umlalt))]
9430pub fn svmlalt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t {
9431 unsafe extern "unadjusted" {
9432 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv2i64")]
9433 fn _svmlalt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
9434 }
9435 unsafe { _svmlalt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
9436}
9437#[doc = "Multiply-add long (top)"]
9438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u64])"]
9439#[inline(always)]
9440#[target_feature(enable = "sve,sve2")]
9441#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9442#[cfg_attr(test, assert_instr(umlalt))]
9443pub fn svmlalt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t {
9444 svmlalt_u64(op1, op2, svdup_n_u32(op3))
9445}
9446#[doc = "Multiply-subtract, minuend first"]
9447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s16])"]
9448#[inline(always)]
9449#[target_feature(enable = "sve,sve2")]
9450#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9451#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))]
9452pub fn svmls_lane_s16<const IMM_INDEX: i32>(
9453 op1: svint16_t,
9454 op2: svint16_t,
9455 op3: svint16_t,
9456) -> svint16_t {
9457 static_assert_range!(IMM_INDEX, 0..=7);
9458 unsafe extern "unadjusted" {
9459 #[cfg_attr(
9460 target_arch = "aarch64",
9461 link_name = "llvm.aarch64.sve.mls.lane.nxv8i16"
9462 )]
9463 fn _svmls_lane_s16(
9464 op1: svint16_t,
9465 op2: svint16_t,
9466 op3: svint16_t,
9467 IMM_INDEX: i32,
9468 ) -> svint16_t;
9469 }
9470 unsafe { _svmls_lane_s16(op1, op2, op3, IMM_INDEX) }
9471}
9472#[doc = "Multiply-subtract, minuend first"]
9473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s32])"]
9474#[inline(always)]
9475#[target_feature(enable = "sve,sve2")]
9476#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9477#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))]
9478pub fn svmls_lane_s32<const IMM_INDEX: i32>(
9479 op1: svint32_t,
9480 op2: svint32_t,
9481 op3: svint32_t,
9482) -> svint32_t {
9483 static_assert_range!(IMM_INDEX, 0..=3);
9484 unsafe extern "unadjusted" {
9485 #[cfg_attr(
9486 target_arch = "aarch64",
9487 link_name = "llvm.aarch64.sve.mls.lane.nxv4i32"
9488 )]
9489 fn _svmls_lane_s32(
9490 op1: svint32_t,
9491 op2: svint32_t,
9492 op3: svint32_t,
9493 IMM_INDEX: i32,
9494 ) -> svint32_t;
9495 }
9496 unsafe { _svmls_lane_s32(op1, op2, op3, IMM_INDEX) }
9497}
9498#[doc = "Multiply-subtract, minuend first"]
9499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s64])"]
9500#[inline(always)]
9501#[target_feature(enable = "sve,sve2")]
9502#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9503#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))]
9504pub fn svmls_lane_s64<const IMM_INDEX: i32>(
9505 op1: svint64_t,
9506 op2: svint64_t,
9507 op3: svint64_t,
9508) -> svint64_t {
9509 static_assert_range!(IMM_INDEX, 0..=1);
9510 unsafe extern "unadjusted" {
9511 #[cfg_attr(
9512 target_arch = "aarch64",
9513 link_name = "llvm.aarch64.sve.mls.lane.nxv2i64"
9514 )]
9515 fn _svmls_lane_s64(
9516 op1: svint64_t,
9517 op2: svint64_t,
9518 op3: svint64_t,
9519 IMM_INDEX: i32,
9520 ) -> svint64_t;
9521 }
9522 unsafe { _svmls_lane_s64(op1, op2, op3, IMM_INDEX) }
9523}
9524#[doc = "Multiply-subtract, minuend first"]
9525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u16])"]
9526#[inline(always)]
9527#[target_feature(enable = "sve,sve2")]
9528#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9529#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))]
9530pub fn svmls_lane_u16<const IMM_INDEX: i32>(
9531 op1: svuint16_t,
9532 op2: svuint16_t,
9533 op3: svuint16_t,
9534) -> svuint16_t {
9535 static_assert_range!(IMM_INDEX, 0..=7);
9536 unsafe {
9537 svmls_lane_s16::<IMM_INDEX>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
9538 }
9539}
9540#[doc = "Multiply-subtract, minuend first"]
9541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u32])"]
9542#[inline(always)]
9543#[target_feature(enable = "sve,sve2")]
9544#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9545#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))]
9546pub fn svmls_lane_u32<const IMM_INDEX: i32>(
9547 op1: svuint32_t,
9548 op2: svuint32_t,
9549 op3: svuint32_t,
9550) -> svuint32_t {
9551 static_assert_range!(IMM_INDEX, 0..=3);
9552 unsafe {
9553 svmls_lane_s32::<IMM_INDEX>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
9554 }
9555}
9556#[doc = "Multiply-subtract, minuend first"]
9557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u64])"]
9558#[inline(always)]
9559#[target_feature(enable = "sve,sve2")]
9560#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9561#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))]
9562pub fn svmls_lane_u64<const IMM_INDEX: i32>(
9563 op1: svuint64_t,
9564 op2: svuint64_t,
9565 op3: svuint64_t,
9566) -> svuint64_t {
9567 static_assert_range!(IMM_INDEX, 0..=1);
9568 unsafe {
9569 svmls_lane_s64::<IMM_INDEX>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
9570 }
9571}
9572#[doc = "Multiply-subtract long (bottom)"]
9573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_s32])"]
9574#[inline(always)]
9575#[target_feature(enable = "sve,sve2")]
9576#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9577#[cfg_attr(test, assert_instr(smlslb, IMM_INDEX = 0))]
9578pub fn svmlslb_lane_s32<const IMM_INDEX: i32>(
9579 op1: svint32_t,
9580 op2: svint16_t,
9581 op3: svint16_t,
9582) -> svint32_t {
9583 static_assert_range!(IMM_INDEX, 0..=7);
9584 unsafe extern "unadjusted" {
9585 #[cfg_attr(
9586 target_arch = "aarch64",
9587 link_name = "llvm.aarch64.sve.smlslb.lane.nxv4i32"
9588 )]
9589 fn _svmlslb_lane_s32(
9590 op1: svint32_t,
9591 op2: svint16_t,
9592 op3: svint16_t,
9593 IMM_INDEX: i32,
9594 ) -> svint32_t;
9595 }
9596 unsafe { _svmlslb_lane_s32(op1, op2, op3, IMM_INDEX) }
9597}
9598#[doc = "Multiply-subtract long (bottom)"]
9599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_s64])"]
9600#[inline(always)]
9601#[target_feature(enable = "sve,sve2")]
9602#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9603#[cfg_attr(test, assert_instr(smlslb, IMM_INDEX = 0))]
9604pub fn svmlslb_lane_s64<const IMM_INDEX: i32>(
9605 op1: svint64_t,
9606 op2: svint32_t,
9607 op3: svint32_t,
9608) -> svint64_t {
9609 static_assert_range!(IMM_INDEX, 0..=3);
9610 unsafe extern "unadjusted" {
9611 #[cfg_attr(
9612 target_arch = "aarch64",
9613 link_name = "llvm.aarch64.sve.smlslb.lane.nxv2i64"
9614 )]
9615 fn _svmlslb_lane_s64(
9616 op1: svint64_t,
9617 op2: svint32_t,
9618 op3: svint32_t,
9619 IMM_INDEX: i32,
9620 ) -> svint64_t;
9621 }
9622 unsafe { _svmlslb_lane_s64(op1, op2, op3, IMM_INDEX) }
9623}
9624#[doc = "Multiply-subtract long (bottom)"]
9625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_u32])"]
9626#[inline(always)]
9627#[target_feature(enable = "sve,sve2")]
9628#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9629#[cfg_attr(test, assert_instr(umlslb, IMM_INDEX = 0))]
9630pub fn svmlslb_lane_u32<const IMM_INDEX: i32>(
9631 op1: svuint32_t,
9632 op2: svuint16_t,
9633 op3: svuint16_t,
9634) -> svuint32_t {
9635 static_assert_range!(IMM_INDEX, 0..=7);
9636 unsafe extern "unadjusted" {
9637 #[cfg_attr(
9638 target_arch = "aarch64",
9639 link_name = "llvm.aarch64.sve.umlslb.lane.nxv4i32"
9640 )]
9641 fn _svmlslb_lane_u32(
9642 op1: svint32_t,
9643 op2: svint16_t,
9644 op3: svint16_t,
9645 IMM_INDEX: i32,
9646 ) -> svint32_t;
9647 }
9648 unsafe {
9649 _svmlslb_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX)
9650 .as_unsigned()
9651 }
9652}
9653#[doc = "Multiply-subtract long (bottom)"]
9654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_u64])"]
9655#[inline(always)]
9656#[target_feature(enable = "sve,sve2")]
9657#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9658#[cfg_attr(test, assert_instr(umlslb, IMM_INDEX = 0))]
9659pub fn svmlslb_lane_u64<const IMM_INDEX: i32>(
9660 op1: svuint64_t,
9661 op2: svuint32_t,
9662 op3: svuint32_t,
9663) -> svuint64_t {
9664 static_assert_range!(IMM_INDEX, 0..=3);
9665 unsafe extern "unadjusted" {
9666 #[cfg_attr(
9667 target_arch = "aarch64",
9668 link_name = "llvm.aarch64.sve.umlslb.lane.nxv2i64"
9669 )]
9670 fn _svmlslb_lane_u64(
9671 op1: svint64_t,
9672 op2: svint32_t,
9673 op3: svint32_t,
9674 IMM_INDEX: i32,
9675 ) -> svint64_t;
9676 }
9677 unsafe {
9678 _svmlslb_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX)
9679 .as_unsigned()
9680 }
9681}
9682#[doc = "Multiply-subtract long (bottom)"]
9683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s16])"]
9684#[inline(always)]
9685#[target_feature(enable = "sve,sve2")]
9686#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9687#[cfg_attr(test, assert_instr(smlslb))]
9688pub fn svmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
9689 unsafe extern "unadjusted" {
9690 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv8i16")]
9691 fn _svmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
9692 }
9693 unsafe { _svmlslb_s16(op1, op2, op3) }
9694}
9695#[doc = "Multiply-subtract long (bottom)"]
9696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s16])"]
9697#[inline(always)]
9698#[target_feature(enable = "sve,sve2")]
9699#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9700#[cfg_attr(test, assert_instr(smlslb))]
9701pub fn svmlslb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
9702 svmlslb_s16(op1, op2, svdup_n_s8(op3))
9703}
9704#[doc = "Multiply-subtract long (bottom)"]
9705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s32])"]
9706#[inline(always)]
9707#[target_feature(enable = "sve,sve2")]
9708#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9709#[cfg_attr(test, assert_instr(smlslb))]
9710pub fn svmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
9711 unsafe extern "unadjusted" {
9712 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv4i32")]
9713 fn _svmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
9714 }
9715 unsafe { _svmlslb_s32(op1, op2, op3) }
9716}
9717#[doc = "Multiply-subtract long (bottom)"]
9718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s32])"]
9719#[inline(always)]
9720#[target_feature(enable = "sve,sve2")]
9721#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9722#[cfg_attr(test, assert_instr(smlslb))]
9723pub fn svmlslb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
9724 svmlslb_s32(op1, op2, svdup_n_s16(op3))
9725}
9726#[doc = "Multiply-subtract long (bottom)"]
9727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s64])"]
9728#[inline(always)]
9729#[target_feature(enable = "sve,sve2")]
9730#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9731#[cfg_attr(test, assert_instr(smlslb))]
9732pub fn svmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
9733 unsafe extern "unadjusted" {
9734 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv2i64")]
9735 fn _svmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
9736 }
9737 unsafe { _svmlslb_s64(op1, op2, op3) }
9738}
9739#[doc = "Multiply-subtract long (bottom)"]
9740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s64])"]
9741#[inline(always)]
9742#[target_feature(enable = "sve,sve2")]
9743#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9744#[cfg_attr(test, assert_instr(smlslb))]
9745pub fn svmlslb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
9746 svmlslb_s64(op1, op2, svdup_n_s32(op3))
9747}
9748#[doc = "Multiply-subtract long (bottom)"]
9749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u16])"]
9750#[inline(always)]
9751#[target_feature(enable = "sve,sve2")]
9752#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9753#[cfg_attr(test, assert_instr(umlslb))]
9754pub fn svmlslb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t {
9755 unsafe extern "unadjusted" {
9756 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv8i16")]
9757 fn _svmlslb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
9758 }
9759 unsafe { _svmlslb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
9760}
9761#[doc = "Multiply-subtract long (bottom)"]
9762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u16])"]
9763#[inline(always)]
9764#[target_feature(enable = "sve,sve2")]
9765#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9766#[cfg_attr(test, assert_instr(umlslb))]
9767pub fn svmlslb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t {
9768 svmlslb_u16(op1, op2, svdup_n_u8(op3))
9769}
9770#[doc = "Multiply-subtract long (bottom)"]
9771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u32])"]
9772#[inline(always)]
9773#[target_feature(enable = "sve,sve2")]
9774#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9775#[cfg_attr(test, assert_instr(umlslb))]
9776pub fn svmlslb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t {
9777 unsafe extern "unadjusted" {
9778 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv4i32")]
9779 fn _svmlslb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
9780 }
9781 unsafe { _svmlslb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
9782}
9783#[doc = "Multiply-subtract long (bottom)"]
9784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u32])"]
9785#[inline(always)]
9786#[target_feature(enable = "sve,sve2")]
9787#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9788#[cfg_attr(test, assert_instr(umlslb))]
9789pub fn svmlslb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t {
9790 svmlslb_u32(op1, op2, svdup_n_u16(op3))
9791}
9792#[doc = "Multiply-subtract long (bottom)"]
9793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u64])"]
9794#[inline(always)]
9795#[target_feature(enable = "sve,sve2")]
9796#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9797#[cfg_attr(test, assert_instr(umlslb))]
9798pub fn svmlslb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t {
9799 unsafe extern "unadjusted" {
9800 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv2i64")]
9801 fn _svmlslb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
9802 }
9803 unsafe { _svmlslb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
9804}
9805#[doc = "Multiply-subtract long (bottom)"]
9806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u64])"]
9807#[inline(always)]
9808#[target_feature(enable = "sve,sve2")]
9809#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9810#[cfg_attr(test, assert_instr(umlslb))]
9811pub fn svmlslb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t {
9812 svmlslb_u64(op1, op2, svdup_n_u32(op3))
9813}
9814#[doc = "Multiply-subtract long (top)"]
9815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_s32])"]
9816#[inline(always)]
9817#[target_feature(enable = "sve,sve2")]
9818#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9819#[cfg_attr(test, assert_instr(smlslt, IMM_INDEX = 0))]
9820pub fn svmlslt_lane_s32<const IMM_INDEX: i32>(
9821 op1: svint32_t,
9822 op2: svint16_t,
9823 op3: svint16_t,
9824) -> svint32_t {
9825 static_assert_range!(IMM_INDEX, 0..=7);
9826 unsafe extern "unadjusted" {
9827 #[cfg_attr(
9828 target_arch = "aarch64",
9829 link_name = "llvm.aarch64.sve.smlslt.lane.nxv4i32"
9830 )]
9831 fn _svmlslt_lane_s32(
9832 op1: svint32_t,
9833 op2: svint16_t,
9834 op3: svint16_t,
9835 IMM_INDEX: i32,
9836 ) -> svint32_t;
9837 }
9838 unsafe { _svmlslt_lane_s32(op1, op2, op3, IMM_INDEX) }
9839}
9840#[doc = "Multiply-subtract long (top)"]
9841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_s64])"]
9842#[inline(always)]
9843#[target_feature(enable = "sve,sve2")]
9844#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9845#[cfg_attr(test, assert_instr(smlslt, IMM_INDEX = 0))]
9846pub fn svmlslt_lane_s64<const IMM_INDEX: i32>(
9847 op1: svint64_t,
9848 op2: svint32_t,
9849 op3: svint32_t,
9850) -> svint64_t {
9851 static_assert_range!(IMM_INDEX, 0..=3);
9852 unsafe extern "unadjusted" {
9853 #[cfg_attr(
9854 target_arch = "aarch64",
9855 link_name = "llvm.aarch64.sve.smlslt.lane.nxv2i64"
9856 )]
9857 fn _svmlslt_lane_s64(
9858 op1: svint64_t,
9859 op2: svint32_t,
9860 op3: svint32_t,
9861 IMM_INDEX: i32,
9862 ) -> svint64_t;
9863 }
9864 unsafe { _svmlslt_lane_s64(op1, op2, op3, IMM_INDEX) }
9865}
9866#[doc = "Multiply-subtract long (top)"]
9867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_u32])"]
9868#[inline(always)]
9869#[target_feature(enable = "sve,sve2")]
9870#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9871#[cfg_attr(test, assert_instr(umlslt, IMM_INDEX = 0))]
9872pub fn svmlslt_lane_u32<const IMM_INDEX: i32>(
9873 op1: svuint32_t,
9874 op2: svuint16_t,
9875 op3: svuint16_t,
9876) -> svuint32_t {
9877 static_assert_range!(IMM_INDEX, 0..=7);
9878 unsafe extern "unadjusted" {
9879 #[cfg_attr(
9880 target_arch = "aarch64",
9881 link_name = "llvm.aarch64.sve.umlslt.lane.nxv4i32"
9882 )]
9883 fn _svmlslt_lane_u32(
9884 op1: svint32_t,
9885 op2: svint16_t,
9886 op3: svint16_t,
9887 IMM_INDEX: i32,
9888 ) -> svint32_t;
9889 }
9890 unsafe {
9891 _svmlslt_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX)
9892 .as_unsigned()
9893 }
9894}
9895#[doc = "Multiply-subtract long (top)"]
9896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_u64])"]
9897#[inline(always)]
9898#[target_feature(enable = "sve,sve2")]
9899#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9900#[cfg_attr(test, assert_instr(umlslt, IMM_INDEX = 0))]
9901pub fn svmlslt_lane_u64<const IMM_INDEX: i32>(
9902 op1: svuint64_t,
9903 op2: svuint32_t,
9904 op3: svuint32_t,
9905) -> svuint64_t {
9906 static_assert_range!(IMM_INDEX, 0..=3);
9907 unsafe extern "unadjusted" {
9908 #[cfg_attr(
9909 target_arch = "aarch64",
9910 link_name = "llvm.aarch64.sve.umlslt.lane.nxv2i64"
9911 )]
9912 fn _svmlslt_lane_u64(
9913 op1: svint64_t,
9914 op2: svint32_t,
9915 op3: svint32_t,
9916 IMM_INDEX: i32,
9917 ) -> svint64_t;
9918 }
9919 unsafe {
9920 _svmlslt_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX)
9921 .as_unsigned()
9922 }
9923}
9924#[doc = "Multiply-subtract long (top)"]
9925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s16])"]
9926#[inline(always)]
9927#[target_feature(enable = "sve,sve2")]
9928#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9929#[cfg_attr(test, assert_instr(smlslt))]
9930pub fn svmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
9931 unsafe extern "unadjusted" {
9932 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv8i16")]
9933 fn _svmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
9934 }
9935 unsafe { _svmlslt_s16(op1, op2, op3) }
9936}
9937#[doc = "Multiply-subtract long (top)"]
9938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s16])"]
9939#[inline(always)]
9940#[target_feature(enable = "sve,sve2")]
9941#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9942#[cfg_attr(test, assert_instr(smlslt))]
9943pub fn svmlslt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
9944 svmlslt_s16(op1, op2, svdup_n_s8(op3))
9945}
9946#[doc = "Multiply-subtract long (top)"]
9947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s32])"]
9948#[inline(always)]
9949#[target_feature(enable = "sve,sve2")]
9950#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9951#[cfg_attr(test, assert_instr(smlslt))]
9952pub fn svmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
9953 unsafe extern "unadjusted" {
9954 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv4i32")]
9955 fn _svmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
9956 }
9957 unsafe { _svmlslt_s32(op1, op2, op3) }
9958}
9959#[doc = "Multiply-subtract long (top)"]
9960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s32])"]
9961#[inline(always)]
9962#[target_feature(enable = "sve,sve2")]
9963#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9964#[cfg_attr(test, assert_instr(smlslt))]
9965pub fn svmlslt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
9966 svmlslt_s32(op1, op2, svdup_n_s16(op3))
9967}
9968#[doc = "Multiply-subtract long (top)"]
9969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s64])"]
9970#[inline(always)]
9971#[target_feature(enable = "sve,sve2")]
9972#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9973#[cfg_attr(test, assert_instr(smlslt))]
9974pub fn svmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
9975 unsafe extern "unadjusted" {
9976 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv2i64")]
9977 fn _svmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
9978 }
9979 unsafe { _svmlslt_s64(op1, op2, op3) }
9980}
9981#[doc = "Multiply-subtract long (top)"]
9982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s64])"]
9983#[inline(always)]
9984#[target_feature(enable = "sve,sve2")]
9985#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9986#[cfg_attr(test, assert_instr(smlslt))]
9987pub fn svmlslt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
9988 svmlslt_s64(op1, op2, svdup_n_s32(op3))
9989}
9990#[doc = "Multiply-subtract long (top)"]
9991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u16])"]
9992#[inline(always)]
9993#[target_feature(enable = "sve,sve2")]
9994#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9995#[cfg_attr(test, assert_instr(umlslt))]
9996pub fn svmlslt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t {
9997 unsafe extern "unadjusted" {
9998 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv8i16")]
9999 fn _svmlslt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
10000 }
10001 unsafe { _svmlslt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
10002}
10003#[doc = "Multiply-subtract long (top)"]
10004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u16])"]
10005#[inline(always)]
10006#[target_feature(enable = "sve,sve2")]
10007#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10008#[cfg_attr(test, assert_instr(umlslt))]
10009pub fn svmlslt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t {
10010 svmlslt_u16(op1, op2, svdup_n_u8(op3))
10011}
10012#[doc = "Multiply-subtract long (top)"]
10013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u32])"]
10014#[inline(always)]
10015#[target_feature(enable = "sve,sve2")]
10016#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10017#[cfg_attr(test, assert_instr(umlslt))]
10018pub fn svmlslt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t {
10019 unsafe extern "unadjusted" {
10020 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv4i32")]
10021 fn _svmlslt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
10022 }
10023 unsafe { _svmlslt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
10024}
10025#[doc = "Multiply-subtract long (top)"]
10026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u32])"]
10027#[inline(always)]
10028#[target_feature(enable = "sve,sve2")]
10029#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10030#[cfg_attr(test, assert_instr(umlslt))]
10031pub fn svmlslt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t {
10032 svmlslt_u32(op1, op2, svdup_n_u16(op3))
10033}
10034#[doc = "Multiply-subtract long (top)"]
10035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u64])"]
10036#[inline(always)]
10037#[target_feature(enable = "sve,sve2")]
10038#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10039#[cfg_attr(test, assert_instr(umlslt))]
10040pub fn svmlslt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t {
10041 unsafe extern "unadjusted" {
10042 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv2i64")]
10043 fn _svmlslt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
10044 }
10045 unsafe { _svmlslt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
10046}
10047#[doc = "Multiply-subtract long (top)"]
10048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u64])"]
10049#[inline(always)]
10050#[target_feature(enable = "sve,sve2")]
10051#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10052#[cfg_attr(test, assert_instr(umlslt))]
10053pub fn svmlslt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t {
10054 svmlslt_u64(op1, op2, svdup_n_u32(op3))
10055}
10056#[doc = "Move long (bottom)"]
10057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s16])"]
10058#[inline(always)]
10059#[target_feature(enable = "sve,sve2")]
10060#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10061#[cfg_attr(test, assert_instr(sshllb))]
10062pub fn svmovlb_s16(op: svint8_t) -> svint16_t {
10063 svshllb_n_s16::<0>(op)
10064}
10065#[doc = "Move long (bottom)"]
10066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s32])"]
10067#[inline(always)]
10068#[target_feature(enable = "sve,sve2")]
10069#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10070#[cfg_attr(test, assert_instr(sshllb))]
10071pub fn svmovlb_s32(op: svint16_t) -> svint32_t {
10072 svshllb_n_s32::<0>(op)
10073}
10074#[doc = "Move long (bottom)"]
10075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s64])"]
10076#[inline(always)]
10077#[target_feature(enable = "sve,sve2")]
10078#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10079#[cfg_attr(test, assert_instr(sshllb))]
10080pub fn svmovlb_s64(op: svint32_t) -> svint64_t {
10081 svshllb_n_s64::<0>(op)
10082}
10083#[doc = "Move long (bottom)"]
10084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u16])"]
10085#[inline(always)]
10086#[target_feature(enable = "sve,sve2")]
10087#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10088#[cfg_attr(test, assert_instr(ushllb))]
10089pub fn svmovlb_u16(op: svuint8_t) -> svuint16_t {
10090 svshllb_n_u16::<0>(op)
10091}
10092#[doc = "Move long (bottom)"]
10093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u32])"]
10094#[inline(always)]
10095#[target_feature(enable = "sve,sve2")]
10096#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10097#[cfg_attr(test, assert_instr(ushllb))]
10098pub fn svmovlb_u32(op: svuint16_t) -> svuint32_t {
10099 svshllb_n_u32::<0>(op)
10100}
10101#[doc = "Move long (bottom)"]
10102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u64])"]
10103#[inline(always)]
10104#[target_feature(enable = "sve,sve2")]
10105#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10106#[cfg_attr(test, assert_instr(ushllb))]
10107pub fn svmovlb_u64(op: svuint32_t) -> svuint64_t {
10108 svshllb_n_u64::<0>(op)
10109}
10110#[doc = "Move long (top)"]
10111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s16])"]
10112#[inline(always)]
10113#[target_feature(enable = "sve,sve2")]
10114#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10115#[cfg_attr(test, assert_instr(sshllt))]
10116pub fn svmovlt_s16(op: svint8_t) -> svint16_t {
10117 svshllt_n_s16::<0>(op)
10118}
10119#[doc = "Move long (top)"]
10120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s32])"]
10121#[inline(always)]
10122#[target_feature(enable = "sve,sve2")]
10123#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10124#[cfg_attr(test, assert_instr(sshllt))]
10125pub fn svmovlt_s32(op: svint16_t) -> svint32_t {
10126 svshllt_n_s32::<0>(op)
10127}
10128#[doc = "Move long (top)"]
10129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s64])"]
10130#[inline(always)]
10131#[target_feature(enable = "sve,sve2")]
10132#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10133#[cfg_attr(test, assert_instr(sshllt))]
10134pub fn svmovlt_s64(op: svint32_t) -> svint64_t {
10135 svshllt_n_s64::<0>(op)
10136}
10137#[doc = "Move long (top)"]
10138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u16])"]
10139#[inline(always)]
10140#[target_feature(enable = "sve,sve2")]
10141#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10142#[cfg_attr(test, assert_instr(ushllt))]
10143pub fn svmovlt_u16(op: svuint8_t) -> svuint16_t {
10144 svshllt_n_u16::<0>(op)
10145}
10146#[doc = "Move long (top)"]
10147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u32])"]
10148#[inline(always)]
10149#[target_feature(enable = "sve,sve2")]
10150#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10151#[cfg_attr(test, assert_instr(ushllt))]
10152pub fn svmovlt_u32(op: svuint16_t) -> svuint32_t {
10153 svshllt_n_u32::<0>(op)
10154}
10155#[doc = "Move long (top)"]
10156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u64])"]
10157#[inline(always)]
10158#[target_feature(enable = "sve,sve2")]
10159#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10160#[cfg_attr(test, assert_instr(ushllt))]
10161pub fn svmovlt_u64(op: svuint32_t) -> svuint64_t {
10162 svshllt_n_u64::<0>(op)
10163}
10164#[doc = "Multiply"]
10165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_f32])"]
10166#[inline(always)]
10167#[target_feature(enable = "sve,sve2")]
10168#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10169#[cfg_attr(test, assert_instr(fmul, IMM_INDEX = 0))]
10170pub fn svmul_lane_f32<const IMM_INDEX: i32>(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
10171 static_assert_range!(IMM_INDEX, 0..=3);
10172 unsafe extern "unadjusted" {
10173 #[cfg_attr(
10174 target_arch = "aarch64",
10175 link_name = "llvm.aarch64.sve.fmul.lane.nxv4f32"
10176 )]
10177 fn _svmul_lane_f32(op1: svfloat32_t, op2: svfloat32_t, imm_index: i32) -> svfloat32_t;
10178 }
10179 unsafe { _svmul_lane_f32(op1, op2, IMM_INDEX) }
10180}
10181#[doc = "Multiply"]
10182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_f64])"]
10183#[inline(always)]
10184#[target_feature(enable = "sve,sve2")]
10185#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10186#[cfg_attr(test, assert_instr(fmul, IMM_INDEX = 0))]
10187pub fn svmul_lane_f64<const IMM_INDEX: i32>(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
10188 static_assert_range!(IMM_INDEX, 0..=1);
10189 unsafe extern "unadjusted" {
10190 #[cfg_attr(
10191 target_arch = "aarch64",
10192 link_name = "llvm.aarch64.sve.fmul.lane.nxv2f64"
10193 )]
10194 fn _svmul_lane_f64(op1: svfloat64_t, op2: svfloat64_t, imm_index: i32) -> svfloat64_t;
10195 }
10196 unsafe { _svmul_lane_f64(op1, op2, IMM_INDEX) }
10197}
10198#[doc = "Multiply"]
10199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s16])"]
10200#[inline(always)]
10201#[target_feature(enable = "sve,sve2")]
10202#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10203#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))]
10204pub fn svmul_lane_s16<const IMM_INDEX: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
10205 static_assert_range!(IMM_INDEX, 0..=7);
10206 unsafe extern "unadjusted" {
10207 #[cfg_attr(
10208 target_arch = "aarch64",
10209 link_name = "llvm.aarch64.sve.mul.lane.nxv8i16"
10210 )]
10211 fn _svmul_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t;
10212 }
10213 unsafe { _svmul_lane_s16(op1, op2, IMM_INDEX) }
10214}
10215#[doc = "Multiply"]
10216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s32])"]
10217#[inline(always)]
10218#[target_feature(enable = "sve,sve2")]
10219#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10220#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))]
10221pub fn svmul_lane_s32<const IMM_INDEX: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
10222 static_assert_range!(IMM_INDEX, 0..=3);
10223 unsafe extern "unadjusted" {
10224 #[cfg_attr(
10225 target_arch = "aarch64",
10226 link_name = "llvm.aarch64.sve.mul.lane.nxv4i32"
10227 )]
10228 fn _svmul_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t;
10229 }
10230 unsafe { _svmul_lane_s32(op1, op2, IMM_INDEX) }
10231}
10232#[doc = "Multiply"]
10233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s64])"]
10234#[inline(always)]
10235#[target_feature(enable = "sve,sve2")]
10236#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10237#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))]
10238pub fn svmul_lane_s64<const IMM_INDEX: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
10239 static_assert_range!(IMM_INDEX, 0..=1);
10240 unsafe extern "unadjusted" {
10241 #[cfg_attr(
10242 target_arch = "aarch64",
10243 link_name = "llvm.aarch64.sve.mul.lane.nxv2i64"
10244 )]
10245 fn _svmul_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t;
10246 }
10247 unsafe { _svmul_lane_s64(op1, op2, IMM_INDEX) }
10248}
10249#[doc = "Multiply"]
10250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u16])"]
10251#[inline(always)]
10252#[target_feature(enable = "sve,sve2")]
10253#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10254#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))]
10255pub fn svmul_lane_u16<const IMM_INDEX: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
10256 static_assert_range!(IMM_INDEX, 0..=7);
10257 unsafe { svmul_lane_s16::<IMM_INDEX>(op1.as_signed(), op2.as_signed()).as_unsigned() }
10258}
10259#[doc = "Multiply"]
10260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u32])"]
10261#[inline(always)]
10262#[target_feature(enable = "sve,sve2")]
10263#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10264#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))]
10265pub fn svmul_lane_u32<const IMM_INDEX: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
10266 static_assert_range!(IMM_INDEX, 0..=3);
10267 unsafe { svmul_lane_s32::<IMM_INDEX>(op1.as_signed(), op2.as_signed()).as_unsigned() }
10268}
10269#[doc = "Multiply"]
10270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u64])"]
10271#[inline(always)]
10272#[target_feature(enable = "sve,sve2")]
10273#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10274#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))]
10275pub fn svmul_lane_u64<const IMM_INDEX: i32>(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
10276 static_assert_range!(IMM_INDEX, 0..=1);
10277 unsafe { svmul_lane_s64::<IMM_INDEX>(op1.as_signed(), op2.as_signed()).as_unsigned() }
10278}
10279#[doc = "Multiply long (bottom)"]
10280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_s32])"]
10281#[inline(always)]
10282#[target_feature(enable = "sve,sve2")]
10283#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10284#[cfg_attr(
10285 all(test, not(target_env = "msvc")),
10286 assert_instr(smullb, IMM_INDEX = 0)
10287)]
10288pub fn svmullb_lane_s32<const IMM_INDEX: i32>(op1: svint16_t, op2: svint16_t) -> svint32_t {
10289 static_assert_range!(IMM_INDEX, 0..=7);
10290 unsafe extern "unadjusted" {
10291 #[cfg_attr(
10292 target_arch = "aarch64",
10293 link_name = "llvm.aarch64.sve.smullb.lane.nxv4i32"
10294 )]
10295 fn _svmullb_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t;
10296 }
10297 unsafe { _svmullb_lane_s32(op1, op2, IMM_INDEX) }
10298}
10299#[doc = "Multiply long (bottom)"]
10300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_s64])"]
10301#[inline(always)]
10302#[target_feature(enable = "sve,sve2")]
10303#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10304#[cfg_attr(
10305 all(test, not(target_env = "msvc")),
10306 assert_instr(smullb, IMM_INDEX = 0)
10307)]
10308pub fn svmullb_lane_s64<const IMM_INDEX: i32>(op1: svint32_t, op2: svint32_t) -> svint64_t {
10309 static_assert_range!(IMM_INDEX, 0..=3);
10310 unsafe extern "unadjusted" {
10311 #[cfg_attr(
10312 target_arch = "aarch64",
10313 link_name = "llvm.aarch64.sve.smullb.lane.nxv2i64"
10314 )]
10315 fn _svmullb_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t;
10316 }
10317 unsafe { _svmullb_lane_s64(op1, op2, IMM_INDEX) }
10318}
10319#[doc = "Multiply long (bottom)"]
10320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_u32])"]
10321#[inline(always)]
10322#[target_feature(enable = "sve,sve2")]
10323#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10324#[cfg_attr(
10325 all(test, not(target_env = "msvc")),
10326 assert_instr(umullb, IMM_INDEX = 0)
10327)]
10328pub fn svmullb_lane_u32<const IMM_INDEX: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
10329 static_assert_range!(IMM_INDEX, 0..=7);
10330 unsafe extern "unadjusted" {
10331 #[cfg_attr(
10332 target_arch = "aarch64",
10333 link_name = "llvm.aarch64.sve.umullb.lane.nxv4i32"
10334 )]
10335 fn _svmullb_lane_u32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t;
10336 }
10337 unsafe { _svmullb_lane_u32(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() }
10338}
10339#[doc = "Multiply long (bottom)"]
10340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_u64])"]
10341#[inline(always)]
10342#[target_feature(enable = "sve,sve2")]
10343#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10344#[cfg_attr(
10345 all(test, not(target_env = "msvc")),
10346 assert_instr(umullb, IMM_INDEX = 0)
10347)]
10348pub fn svmullb_lane_u64<const IMM_INDEX: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
10349 static_assert_range!(IMM_INDEX, 0..=3);
10350 unsafe extern "unadjusted" {
10351 #[cfg_attr(
10352 target_arch = "aarch64",
10353 link_name = "llvm.aarch64.sve.umullb.lane.nxv2i64"
10354 )]
10355 fn _svmullb_lane_u64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t;
10356 }
10357 unsafe { _svmullb_lane_u64(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() }
10358}
10359#[doc = "Multiply long (bottom)"]
10360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s16])"]
10361#[inline(always)]
10362#[target_feature(enable = "sve,sve2")]
10363#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10364#[cfg_attr(test, assert_instr(smullb))]
10365pub fn svmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
10366 unsafe extern "unadjusted" {
10367 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv8i16")]
10368 fn _svmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
10369 }
10370 unsafe { _svmullb_s16(op1, op2) }
10371}
10372#[doc = "Multiply long (bottom)"]
10373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s16])"]
10374#[inline(always)]
10375#[target_feature(enable = "sve,sve2")]
10376#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10377#[cfg_attr(test, assert_instr(smullb))]
10378pub fn svmullb_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
10379 svmullb_s16(op1, svdup_n_s8(op2))
10380}
10381#[doc = "Multiply long (bottom)"]
10382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s32])"]
10383#[inline(always)]
10384#[target_feature(enable = "sve,sve2")]
10385#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10386#[cfg_attr(test, assert_instr(smullb))]
10387pub fn svmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
10388 unsafe extern "unadjusted" {
10389 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv4i32")]
10390 fn _svmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
10391 }
10392 unsafe { _svmullb_s32(op1, op2) }
10393}
10394#[doc = "Multiply long (bottom)"]
10395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s32])"]
10396#[inline(always)]
10397#[target_feature(enable = "sve,sve2")]
10398#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10399#[cfg_attr(test, assert_instr(smullb))]
10400pub fn svmullb_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
10401 svmullb_s32(op1, svdup_n_s16(op2))
10402}
10403#[doc = "Multiply long (bottom)"]
10404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s64])"]
10405#[inline(always)]
10406#[target_feature(enable = "sve,sve2")]
10407#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10408#[cfg_attr(test, assert_instr(smullb))]
10409pub fn svmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
10410 unsafe extern "unadjusted" {
10411 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv2i64")]
10412 fn _svmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
10413 }
10414 unsafe { _svmullb_s64(op1, op2) }
10415}
10416#[doc = "Multiply long (bottom)"]
10417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s64])"]
10418#[inline(always)]
10419#[target_feature(enable = "sve,sve2")]
10420#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10421#[cfg_attr(test, assert_instr(smullb))]
10422pub fn svmullb_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
10423 svmullb_s64(op1, svdup_n_s32(op2))
10424}
10425#[doc = "Multiply long (bottom)"]
10426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u16])"]
10427#[inline(always)]
10428#[target_feature(enable = "sve,sve2")]
10429#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10430#[cfg_attr(test, assert_instr(umullb))]
10431pub fn svmullb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
10432 unsafe extern "unadjusted" {
10433 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv8i16")]
10434 fn _svmullb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t;
10435 }
10436 unsafe { _svmullb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
10437}
10438#[doc = "Multiply long (bottom)"]
10439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u16])"]
10440#[inline(always)]
10441#[target_feature(enable = "sve,sve2")]
10442#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10443#[cfg_attr(test, assert_instr(umullb))]
10444pub fn svmullb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
10445 svmullb_u16(op1, svdup_n_u8(op2))
10446}
10447#[doc = "Multiply long (bottom)"]
10448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u32])"]
10449#[inline(always)]
10450#[target_feature(enable = "sve,sve2")]
10451#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10452#[cfg_attr(test, assert_instr(umullb))]
10453pub fn svmullb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
10454 unsafe extern "unadjusted" {
10455 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv4i32")]
10456 fn _svmullb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t;
10457 }
10458 unsafe { _svmullb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
10459}
10460#[doc = "Multiply long (bottom)"]
10461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u32])"]
10462#[inline(always)]
10463#[target_feature(enable = "sve,sve2")]
10464#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10465#[cfg_attr(test, assert_instr(umullb))]
10466pub fn svmullb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t {
10467 svmullb_u32(op1, svdup_n_u16(op2))
10468}
10469#[doc = "Multiply long (bottom)"]
10470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u64])"]
10471#[inline(always)]
10472#[target_feature(enable = "sve,sve2")]
10473#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10474#[cfg_attr(test, assert_instr(umullb))]
10475pub fn svmullb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
10476 unsafe extern "unadjusted" {
10477 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv2i64")]
10478 fn _svmullb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t;
10479 }
10480 unsafe { _svmullb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
10481}
10482#[doc = "Multiply long (bottom)"]
10483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u64])"]
10484#[inline(always)]
10485#[target_feature(enable = "sve,sve2")]
10486#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10487#[cfg_attr(test, assert_instr(umullb))]
10488pub fn svmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
10489 svmullb_u64(op1, svdup_n_u32(op2))
10490}
10491#[doc = "Multiply long (top)"]
10492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_s32])"]
10493#[inline(always)]
10494#[target_feature(enable = "sve,sve2")]
10495#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10496#[cfg_attr(
10497 all(test, not(target_env = "msvc")),
10498 assert_instr(smullt, IMM_INDEX = 0)
10499)]
10500pub fn svmullt_lane_s32<const IMM_INDEX: i32>(op1: svint16_t, op2: svint16_t) -> svint32_t {
10501 static_assert_range!(IMM_INDEX, 0..=7);
10502 unsafe extern "unadjusted" {
10503 #[cfg_attr(
10504 target_arch = "aarch64",
10505 link_name = "llvm.aarch64.sve.smullt.lane.nxv4i32"
10506 )]
10507 fn _svmullt_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t;
10508 }
10509 unsafe { _svmullt_lane_s32(op1, op2, IMM_INDEX) }
10510}
10511#[doc = "Multiply long (top)"]
10512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_s64])"]
10513#[inline(always)]
10514#[target_feature(enable = "sve,sve2")]
10515#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10516#[cfg_attr(
10517 all(test, not(target_env = "msvc")),
10518 assert_instr(smullt, IMM_INDEX = 0)
10519)]
10520pub fn svmullt_lane_s64<const IMM_INDEX: i32>(op1: svint32_t, op2: svint32_t) -> svint64_t {
10521 static_assert_range!(IMM_INDEX, 0..=3);
10522 unsafe extern "unadjusted" {
10523 #[cfg_attr(
10524 target_arch = "aarch64",
10525 link_name = "llvm.aarch64.sve.smullt.lane.nxv2i64"
10526 )]
10527 fn _svmullt_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t;
10528 }
10529 unsafe { _svmullt_lane_s64(op1, op2, IMM_INDEX) }
10530}
10531#[doc = "Multiply long (top)"]
10532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_u32])"]
10533#[inline(always)]
10534#[target_feature(enable = "sve,sve2")]
10535#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10536#[cfg_attr(
10537 all(test, not(target_env = "msvc")),
10538 assert_instr(umullt, IMM_INDEX = 0)
10539)]
10540pub fn svmullt_lane_u32<const IMM_INDEX: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
10541 static_assert_range!(IMM_INDEX, 0..=7);
10542 unsafe extern "unadjusted" {
10543 #[cfg_attr(
10544 target_arch = "aarch64",
10545 link_name = "llvm.aarch64.sve.umullt.lane.nxv4i32"
10546 )]
10547 fn _svmullt_lane_u32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t;
10548 }
10549 unsafe { _svmullt_lane_u32(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() }
10550}
10551#[doc = "Multiply long (top)"]
10552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_u64])"]
10553#[inline(always)]
10554#[target_feature(enable = "sve,sve2")]
10555#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10556#[cfg_attr(
10557 all(test, not(target_env = "msvc")),
10558 assert_instr(umullt, IMM_INDEX = 0)
10559)]
10560pub fn svmullt_lane_u64<const IMM_INDEX: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
10561 static_assert_range!(IMM_INDEX, 0..=3);
10562 unsafe extern "unadjusted" {
10563 #[cfg_attr(
10564 target_arch = "aarch64",
10565 link_name = "llvm.aarch64.sve.umullt.lane.nxv2i64"
10566 )]
10567 fn _svmullt_lane_u64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t;
10568 }
10569 unsafe { _svmullt_lane_u64(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() }
10570}
10571#[doc = "Multiply long (top)"]
10572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s16])"]
10573#[inline(always)]
10574#[target_feature(enable = "sve,sve2")]
10575#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10576#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))]
10577pub fn svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
10578 unsafe extern "unadjusted" {
10579 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv8i16")]
10580 fn _svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
10581 }
10582 unsafe { _svmullt_s16(op1, op2) }
10583}
10584#[doc = "Multiply long (top)"]
10585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s16])"]
10586#[inline(always)]
10587#[target_feature(enable = "sve,sve2")]
10588#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10589#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))]
10590pub fn svmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
10591 svmullt_s16(op1, svdup_n_s8(op2))
10592}
10593#[doc = "Multiply long (top)"]
10594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s32])"]
10595#[inline(always)]
10596#[target_feature(enable = "sve,sve2")]
10597#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10598#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))]
10599pub fn svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
10600 unsafe extern "unadjusted" {
10601 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv4i32")]
10602 fn _svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
10603 }
10604 unsafe { _svmullt_s32(op1, op2) }
10605}
10606#[doc = "Multiply long (top)"]
10607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s32])"]
10608#[inline(always)]
10609#[target_feature(enable = "sve,sve2")]
10610#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10611#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))]
10612pub fn svmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
10613 svmullt_s32(op1, svdup_n_s16(op2))
10614}
10615#[doc = "Multiply long (top)"]
10616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s64])"]
10617#[inline(always)]
10618#[target_feature(enable = "sve,sve2")]
10619#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10620#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))]
10621pub fn svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
10622 unsafe extern "unadjusted" {
10623 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv2i64")]
10624 fn _svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
10625 }
10626 unsafe { _svmullt_s64(op1, op2) }
10627}
10628#[doc = "Multiply long (top)"]
10629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s64])"]
10630#[inline(always)]
10631#[target_feature(enable = "sve,sve2")]
10632#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10633#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))]
10634pub fn svmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
10635 svmullt_s64(op1, svdup_n_s32(op2))
10636}
10637#[doc = "Multiply long (top)"]
10638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u16])"]
10639#[inline(always)]
10640#[target_feature(enable = "sve,sve2")]
10641#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10642#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))]
10643pub fn svmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
10644 unsafe extern "unadjusted" {
10645 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv8i16")]
10646 fn _svmullt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t;
10647 }
10648 unsafe { _svmullt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
10649}
10650#[doc = "Multiply long (top)"]
10651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u16])"]
10652#[inline(always)]
10653#[target_feature(enable = "sve,sve2")]
10654#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10655#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))]
10656pub fn svmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
10657 svmullt_u16(op1, svdup_n_u8(op2))
10658}
10659#[doc = "Multiply long (top)"]
10660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u32])"]
10661#[inline(always)]
10662#[target_feature(enable = "sve,sve2")]
10663#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10664#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))]
10665pub fn svmullt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
10666 unsafe extern "unadjusted" {
10667 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv4i32")]
10668 fn _svmullt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t;
10669 }
10670 unsafe { _svmullt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
10671}
10672#[doc = "Multiply long (top)"]
10673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u32])"]
10674#[inline(always)]
10675#[target_feature(enable = "sve,sve2")]
10676#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10677#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))]
10678pub fn svmullt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t {
10679 svmullt_u32(op1, svdup_n_u16(op2))
10680}
10681#[doc = "Multiply long (top)"]
10682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u64])"]
10683#[inline(always)]
10684#[target_feature(enable = "sve,sve2")]
10685#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10686#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))]
10687pub fn svmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
10688 unsafe extern "unadjusted" {
10689 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv2i64")]
10690 fn _svmullt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t;
10691 }
10692 unsafe { _svmullt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
10693}
10694#[doc = "Multiply long (top)"]
10695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u64])"]
10696#[inline(always)]
10697#[target_feature(enable = "sve,sve2")]
10698#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10699#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))]
10700pub fn svmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
10701 svmullt_u64(op1, svdup_n_u32(op2))
10702}
10703#[doc = "Bitwise select"]
10704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s8])"]
10705#[inline(always)]
10706#[target_feature(enable = "sve,sve2")]
10707#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10708#[cfg_attr(test, assert_instr(nbsl))]
10709pub fn svnbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
10710 unsafe extern "unadjusted" {
10711 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv16i8")]
10712 fn _svnbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
10713 }
10714 unsafe { _svnbsl_s8(op1, op2, op3) }
10715}
10716#[doc = "Bitwise select"]
10717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s8])"]
10718#[inline(always)]
10719#[target_feature(enable = "sve,sve2")]
10720#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10721#[cfg_attr(test, assert_instr(nbsl))]
10722pub fn svnbsl_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
10723 svnbsl_s8(op1, op2, svdup_n_s8(op3))
10724}
10725#[doc = "Bitwise select"]
10726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s16])"]
10727#[inline(always)]
10728#[target_feature(enable = "sve,sve2")]
10729#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10730#[cfg_attr(test, assert_instr(nbsl))]
10731pub fn svnbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
10732 unsafe extern "unadjusted" {
10733 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv8i16")]
10734 fn _svnbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
10735 }
10736 unsafe { _svnbsl_s16(op1, op2, op3) }
10737}
10738#[doc = "Bitwise select"]
10739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s16])"]
10740#[inline(always)]
10741#[target_feature(enable = "sve,sve2")]
10742#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10743#[cfg_attr(test, assert_instr(nbsl))]
10744pub fn svnbsl_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
10745 svnbsl_s16(op1, op2, svdup_n_s16(op3))
10746}
10747#[doc = "Bitwise select"]
10748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s32])"]
10749#[inline(always)]
10750#[target_feature(enable = "sve,sve2")]
10751#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10752#[cfg_attr(test, assert_instr(nbsl))]
10753pub fn svnbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
10754 unsafe extern "unadjusted" {
10755 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv4i32")]
10756 fn _svnbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
10757 }
10758 unsafe { _svnbsl_s32(op1, op2, op3) }
10759}
10760#[doc = "Bitwise select"]
10761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s32])"]
10762#[inline(always)]
10763#[target_feature(enable = "sve,sve2")]
10764#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10765#[cfg_attr(test, assert_instr(nbsl))]
10766pub fn svnbsl_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
10767 svnbsl_s32(op1, op2, svdup_n_s32(op3))
10768}
10769#[doc = "Bitwise select"]
10770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s64])"]
10771#[inline(always)]
10772#[target_feature(enable = "sve,sve2")]
10773#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10774#[cfg_attr(test, assert_instr(nbsl))]
10775pub fn svnbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
10776 unsafe extern "unadjusted" {
10777 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv2i64")]
10778 fn _svnbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
10779 }
10780 unsafe { _svnbsl_s64(op1, op2, op3) }
10781}
10782#[doc = "Bitwise select"]
10783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s64])"]
10784#[inline(always)]
10785#[target_feature(enable = "sve,sve2")]
10786#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10787#[cfg_attr(test, assert_instr(nbsl))]
10788pub fn svnbsl_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
10789 svnbsl_s64(op1, op2, svdup_n_s64(op3))
10790}
10791#[doc = "Bitwise select"]
10792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u8])"]
10793#[inline(always)]
10794#[target_feature(enable = "sve,sve2")]
10795#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10796#[cfg_attr(test, assert_instr(nbsl))]
10797pub fn svnbsl_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
10798 unsafe { svnbsl_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
10799}
10800#[doc = "Bitwise select"]
10801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u8])"]
10802#[inline(always)]
10803#[target_feature(enable = "sve,sve2")]
10804#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10805#[cfg_attr(test, assert_instr(nbsl))]
10806pub fn svnbsl_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
10807 svnbsl_u8(op1, op2, svdup_n_u8(op3))
10808}
10809#[doc = "Bitwise select"]
10810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u16])"]
10811#[inline(always)]
10812#[target_feature(enable = "sve,sve2")]
10813#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10814#[cfg_attr(test, assert_instr(nbsl))]
10815pub fn svnbsl_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
10816 unsafe { svnbsl_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
10817}
10818#[doc = "Bitwise select"]
10819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u16])"]
10820#[inline(always)]
10821#[target_feature(enable = "sve,sve2")]
10822#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10823#[cfg_attr(test, assert_instr(nbsl))]
10824pub fn svnbsl_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
10825 svnbsl_u16(op1, op2, svdup_n_u16(op3))
10826}
10827#[doc = "Bitwise select"]
10828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u32])"]
10829#[inline(always)]
10830#[target_feature(enable = "sve,sve2")]
10831#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10832#[cfg_attr(test, assert_instr(nbsl))]
10833pub fn svnbsl_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
10834 unsafe { svnbsl_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
10835}
10836#[doc = "Bitwise select"]
10837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u32])"]
10838#[inline(always)]
10839#[target_feature(enable = "sve,sve2")]
10840#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10841#[cfg_attr(test, assert_instr(nbsl))]
10842pub fn svnbsl_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
10843 svnbsl_u32(op1, op2, svdup_n_u32(op3))
10844}
10845#[doc = "Bitwise select"]
10846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u64])"]
10847#[inline(always)]
10848#[target_feature(enable = "sve,sve2")]
10849#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10850#[cfg_attr(test, assert_instr(nbsl))]
10851pub fn svnbsl_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
10852 unsafe { svnbsl_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
10853}
10854#[doc = "Bitwise select"]
10855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u64])"]
10856#[inline(always)]
10857#[target_feature(enable = "sve,sve2")]
10858#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10859#[cfg_attr(test, assert_instr(nbsl))]
10860pub fn svnbsl_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
10861 svnbsl_u64(op1, op2, svdup_n_u64(op3))
10862}
10863#[doc = "Detect no matching elements"]
10864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_s8])"]
10865#[inline(always)]
10866#[target_feature(enable = "sve,sve2")]
10867#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10868#[cfg_attr(test, assert_instr(nmatch))]
10869pub fn svnmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t {
10870 unsafe extern "unadjusted" {
10871 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nmatch.nxv16i8")]
10872 fn _svnmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t;
10873 }
10874 unsafe { _svnmatch_s8(pg, op1, op2) }
10875}
10876#[doc = "Detect no matching elements"]
10877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_s16])"]
10878#[inline(always)]
10879#[target_feature(enable = "sve,sve2")]
10880#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10881#[cfg_attr(test, assert_instr(nmatch))]
10882pub fn svnmatch_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t {
10883 unsafe extern "unadjusted" {
10884 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nmatch.nxv8i16")]
10885 fn _svnmatch_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t;
10886 }
10887 unsafe { _svnmatch_s16(pg.sve_into(), op1, op2).sve_into() }
10888}
10889#[doc = "Detect no matching elements"]
10890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_u8])"]
10891#[inline(always)]
10892#[target_feature(enable = "sve,sve2")]
10893#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10894#[cfg_attr(test, assert_instr(nmatch))]
10895pub fn svnmatch_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t {
10896 unsafe { svnmatch_s8(pg, op1.as_signed(), op2.as_signed()) }
10897}
10898#[doc = "Detect no matching elements"]
10899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_u16])"]
10900#[inline(always)]
10901#[target_feature(enable = "sve,sve2")]
10902#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10903#[cfg_attr(test, assert_instr(nmatch))]
10904pub fn svnmatch_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t {
10905 unsafe { svnmatch_s16(pg, op1.as_signed(), op2.as_signed()) }
10906}
10907#[doc = "Polynomial multiply"]
10908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmul[_u8])"]
10909#[inline(always)]
10910#[target_feature(enable = "sve,sve2")]
10911#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10912#[cfg_attr(test, assert_instr(pmul))]
10913pub fn svpmul_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
10914 unsafe extern "unadjusted" {
10915 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pmul.nxv16i8")]
10916 fn _svpmul_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
10917 }
10918 unsafe { _svpmul_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
10919}
10920#[doc = "Polynomial multiply"]
10921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmul[_n_u8])"]
10922#[inline(always)]
10923#[target_feature(enable = "sve,sve2")]
10924#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10925#[cfg_attr(test, assert_instr(pmul))]
10926pub fn svpmul_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
10927 svpmul_u8(op1, svdup_n_u8(op2))
10928}
10929#[doc = "Polynomial multiply long (bottom)"]
10930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u8])"]
10931#[inline(always)]
10932#[target_feature(enable = "sve,sve2,sve2-aes")]
10933#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10934#[cfg_attr(test, assert_instr(pmullb))]
10935pub fn svpmullb_pair_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
10936 unsafe extern "unadjusted" {
10937 #[cfg_attr(
10938 target_arch = "aarch64",
10939 link_name = "llvm.aarch64.sve.pmullb.pair.nxv16i8"
10940 )]
10941 fn _svpmullb_pair_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
10942 }
10943 unsafe { _svpmullb_pair_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
10944}
10945#[doc = "Polynomial multiply long (bottom)"]
10946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u8])"]
10947#[inline(always)]
10948#[target_feature(enable = "sve,sve2,sve2-aes")]
10949#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10950#[cfg_attr(test, assert_instr(pmullb))]
10951pub fn svpmullb_pair_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
10952 svpmullb_pair_u8(op1, svdup_n_u8(op2))
10953}
10954#[doc = "Polynomial multiply long (bottom)"]
10955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u32])"]
10956#[inline(always)]
10957#[target_feature(enable = "sve,sve2,sve2-aes")]
10958#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10959#[cfg_attr(test, assert_instr(pmullb))]
10960pub fn svpmullb_pair_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
10961 unsafe extern "unadjusted" {
10962 #[cfg_attr(
10963 target_arch = "aarch64",
10964 link_name = "llvm.aarch64.sve.pmullb.pair.nxv4i32"
10965 )]
10966 fn _svpmullb_pair_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
10967 }
10968 unsafe { _svpmullb_pair_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
10969}
10970#[doc = "Polynomial multiply long (bottom)"]
10971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u32])"]
10972#[inline(always)]
10973#[target_feature(enable = "sve,sve2,sve2-aes")]
10974#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10975#[cfg_attr(test, assert_instr(pmullb))]
10976pub fn svpmullb_pair_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t {
10977 svpmullb_pair_u32(op1, svdup_n_u32(op2))
10978}
10979#[doc = "Polynomial multiply long (bottom)"]
10980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u64])"]
10981#[inline(always)]
10982#[target_feature(enable = "sve,sve2,sve2-aes")]
10983#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10984#[cfg_attr(test, assert_instr(pmullb))]
10985pub fn svpmullb_pair_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
10986 unsafe extern "unadjusted" {
10987 #[cfg_attr(
10988 target_arch = "aarch64",
10989 link_name = "llvm.aarch64.sve.pmullb.pair.nxv2i64"
10990 )]
10991 fn _svpmullb_pair_u64(op1: svint64_t, op2: svint64_t) -> svint64_t;
10992 }
10993 unsafe { _svpmullb_pair_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
10994}
10995#[doc = "Polynomial multiply long (bottom)"]
10996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u64])"]
10997#[inline(always)]
10998#[target_feature(enable = "sve,sve2,sve2-aes")]
10999#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11000#[cfg_attr(test, assert_instr(pmullb))]
11001pub fn svpmullb_pair_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t {
11002 svpmullb_pair_u64(op1, svdup_n_u64(op2))
11003}
11004#[doc = "Polynomial multiply long (bottom)"]
11005#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_u16])"]
11006#[inline(always)]
11007#[target_feature(enable = "sve,sve2,sve2-aes")]
11008#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11009#[cfg_attr(test, assert_instr(pmullb))]
11010pub fn svpmullb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
11011 unsafe { crate::intrinsics::transmute_unchecked(svpmullb_pair_u8(op1, op2)) }
11012}
11013#[doc = "Polynomial multiply long (bottom)"]
11014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_n_u16])"]
11015#[inline(always)]
11016#[target_feature(enable = "sve,sve2,sve2-aes")]
11017#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11018#[cfg_attr(test, assert_instr(pmullb))]
11019pub fn svpmullb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
11020 svpmullb_u16(op1, svdup_n_u8(op2))
11021}
11022#[doc = "Polynomial multiply long (bottom)"]
11023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_u64])"]
11024#[inline(always)]
11025#[target_feature(enable = "sve,sve2,sve2-aes")]
11026#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11027#[cfg_attr(test, assert_instr(pmullb))]
11028pub fn svpmullb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
11029 unsafe { crate::intrinsics::transmute_unchecked(svpmullb_pair_u32(op1, op2)) }
11030}
11031#[doc = "Polynomial multiply long (bottom)"]
11032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_n_u64])"]
11033#[inline(always)]
11034#[target_feature(enable = "sve,sve2,sve2-aes")]
11035#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11036#[cfg_attr(test, assert_instr(pmullb))]
11037pub fn svpmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
11038 svpmullb_u64(op1, svdup_n_u32(op2))
11039}
11040#[doc = "Polynomial multiply long (top)"]
11041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u8])"]
11042#[inline(always)]
11043#[target_feature(enable = "sve,sve2,sve2-aes")]
11044#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11045#[cfg_attr(test, assert_instr(pmullt))]
11046pub fn svpmullt_pair_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
11047 unsafe extern "unadjusted" {
11048 #[cfg_attr(
11049 target_arch = "aarch64",
11050 link_name = "llvm.aarch64.sve.pmullt.pair.nxv16i8"
11051 )]
11052 fn _svpmullt_pair_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
11053 }
11054 unsafe { _svpmullt_pair_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
11055}
11056#[doc = "Polynomial multiply long (top)"]
11057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u8])"]
11058#[inline(always)]
11059#[target_feature(enable = "sve,sve2,sve2-aes")]
11060#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11061#[cfg_attr(test, assert_instr(pmullt))]
11062pub fn svpmullt_pair_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
11063 svpmullt_pair_u8(op1, svdup_n_u8(op2))
11064}
11065#[doc = "Polynomial multiply long (top)"]
11066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u32])"]
11067#[inline(always)]
11068#[target_feature(enable = "sve,sve2,sve2-aes")]
11069#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11070#[cfg_attr(test, assert_instr(pmullt))]
11071pub fn svpmullt_pair_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
11072 unsafe extern "unadjusted" {
11073 #[cfg_attr(
11074 target_arch = "aarch64",
11075 link_name = "llvm.aarch64.sve.pmullt.pair.nxv4i32"
11076 )]
11077 fn _svpmullt_pair_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
11078 }
11079 unsafe { _svpmullt_pair_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
11080}
11081#[doc = "Polynomial multiply long (top)"]
11082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u32])"]
11083#[inline(always)]
11084#[target_feature(enable = "sve,sve2,sve2-aes")]
11085#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11086#[cfg_attr(test, assert_instr(pmullt))]
11087pub fn svpmullt_pair_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t {
11088 svpmullt_pair_u32(op1, svdup_n_u32(op2))
11089}
11090#[doc = "Polynomial multiply long (top)"]
11091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u64])"]
11092#[inline(always)]
11093#[target_feature(enable = "sve,sve2,sve2-aes")]
11094#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11095#[cfg_attr(test, assert_instr(pmullt))]
11096pub fn svpmullt_pair_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
11097 unsafe extern "unadjusted" {
11098 #[cfg_attr(
11099 target_arch = "aarch64",
11100 link_name = "llvm.aarch64.sve.pmullt.pair.nxv2i64"
11101 )]
11102 fn _svpmullt_pair_u64(op1: svint64_t, op2: svint64_t) -> svint64_t;
11103 }
11104 unsafe { _svpmullt_pair_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
11105}
11106#[doc = "Polynomial multiply long (top)"]
11107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u64])"]
11108#[inline(always)]
11109#[target_feature(enable = "sve,sve2,sve2-aes")]
11110#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11111#[cfg_attr(test, assert_instr(pmullt))]
11112pub fn svpmullt_pair_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t {
11113 svpmullt_pair_u64(op1, svdup_n_u64(op2))
11114}
11115#[doc = "Polynomial multiply long (top)"]
11116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_u16])"]
11117#[inline(always)]
11118#[target_feature(enable = "sve,sve2,sve2-aes")]
11119#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11120#[cfg_attr(test, assert_instr(pmullt))]
11121pub fn svpmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
11122 unsafe { crate::intrinsics::transmute_unchecked(svpmullt_pair_u8(op1, op2)) }
11123}
11124#[doc = "Polynomial multiply long (top)"]
11125#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_n_u16])"]
11126#[inline(always)]
11127#[target_feature(enable = "sve,sve2,sve2-aes")]
11128#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11129#[cfg_attr(test, assert_instr(pmullt))]
11130pub fn svpmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
11131 svpmullt_u16(op1, svdup_n_u8(op2))
11132}
11133#[doc = "Polynomial multiply long (top)"]
11134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_u64])"]
11135#[inline(always)]
11136#[target_feature(enable = "sve,sve2,sve2-aes")]
11137#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11138#[cfg_attr(test, assert_instr(pmullt))]
11139pub fn svpmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
11140 unsafe { crate::intrinsics::transmute_unchecked(svpmullt_pair_u32(op1, op2)) }
11141}
11142#[doc = "Polynomial multiply long (top)"]
11143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_n_u64])"]
11144#[inline(always)]
11145#[target_feature(enable = "sve,sve2,sve2-aes")]
11146#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11147#[cfg_attr(test, assert_instr(pmullt))]
11148pub fn svpmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
11149 svpmullt_u64(op1, svdup_n_u32(op2))
11150}
11151#[doc = "Saturating absolute value"]
11152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_m)"]
11153#[inline(always)]
11154#[target_feature(enable = "sve,sve2")]
11155#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11156#[cfg_attr(test, assert_instr(sqabs))]
11157pub fn svqabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t {
11158 unsafe extern "unadjusted" {
11159 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv16i8")]
11160 fn _svqabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
11161 }
11162 unsafe { _svqabs_s8_m(inactive, pg, op) }
11163}
11164#[doc = "Saturating absolute value"]
11165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_x)"]
11166#[inline(always)]
11167#[target_feature(enable = "sve,sve2")]
11168#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11169#[cfg_attr(test, assert_instr(sqabs))]
11170pub fn svqabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t {
11171 svqabs_s8_m(op, pg, op)
11172}
11173#[doc = "Saturating absolute value"]
11174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_z)"]
11175#[inline(always)]
11176#[target_feature(enable = "sve,sve2")]
11177#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11178#[cfg_attr(test, assert_instr(sqabs))]
11179pub fn svqabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t {
11180 svqabs_s8_m(svdup_n_s8(0), pg, op)
11181}
11182#[doc = "Saturating absolute value"]
11183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_m)"]
11184#[inline(always)]
11185#[target_feature(enable = "sve,sve2")]
11186#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11187#[cfg_attr(test, assert_instr(sqabs))]
11188pub fn svqabs_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
11189 unsafe extern "unadjusted" {
11190 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv8i16")]
11191 fn _svqabs_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
11192 }
11193 unsafe { _svqabs_s16_m(inactive, pg.sve_into(), op) }
11194}
11195#[doc = "Saturating absolute value"]
11196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_x)"]
11197#[inline(always)]
11198#[target_feature(enable = "sve,sve2")]
11199#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11200#[cfg_attr(test, assert_instr(sqabs))]
11201pub fn svqabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
11202 svqabs_s16_m(op, pg, op)
11203}
11204#[doc = "Saturating absolute value"]
11205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_z)"]
11206#[inline(always)]
11207#[target_feature(enable = "sve,sve2")]
11208#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11209#[cfg_attr(test, assert_instr(sqabs))]
11210pub fn svqabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
11211 svqabs_s16_m(svdup_n_s16(0), pg, op)
11212}
11213#[doc = "Saturating absolute value"]
11214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_m)"]
11215#[inline(always)]
11216#[target_feature(enable = "sve,sve2")]
11217#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11218#[cfg_attr(test, assert_instr(sqabs))]
11219pub fn svqabs_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
11220 unsafe extern "unadjusted" {
11221 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv4i32")]
11222 fn _svqabs_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
11223 }
11224 unsafe { _svqabs_s32_m(inactive, pg.sve_into(), op) }
11225}
11226#[doc = "Saturating absolute value"]
11227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_x)"]
11228#[inline(always)]
11229#[target_feature(enable = "sve,sve2")]
11230#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11231#[cfg_attr(test, assert_instr(sqabs))]
11232pub fn svqabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
11233 svqabs_s32_m(op, pg, op)
11234}
11235#[doc = "Saturating absolute value"]
11236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_z)"]
11237#[inline(always)]
11238#[target_feature(enable = "sve,sve2")]
11239#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11240#[cfg_attr(test, assert_instr(sqabs))]
11241pub fn svqabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
11242 svqabs_s32_m(svdup_n_s32(0), pg, op)
11243}
11244#[doc = "Saturating absolute value"]
11245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_m)"]
11246#[inline(always)]
11247#[target_feature(enable = "sve,sve2")]
11248#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11249#[cfg_attr(test, assert_instr(sqabs))]
11250pub fn svqabs_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
11251 unsafe extern "unadjusted" {
11252 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv2i64")]
11253 fn _svqabs_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
11254 }
11255 unsafe { _svqabs_s64_m(inactive, pg.sve_into(), op) }
11256}
11257#[doc = "Saturating absolute value"]
11258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_x)"]
11259#[inline(always)]
11260#[target_feature(enable = "sve,sve2")]
11261#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11262#[cfg_attr(test, assert_instr(sqabs))]
11263pub fn svqabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
11264 svqabs_s64_m(op, pg, op)
11265}
11266#[doc = "Saturating absolute value"]
11267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_z)"]
11268#[inline(always)]
11269#[target_feature(enable = "sve,sve2")]
11270#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11271#[cfg_attr(test, assert_instr(sqabs))]
11272pub fn svqabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
11273 svqabs_s64_m(svdup_n_s64(0), pg, op)
11274}
11275#[doc = "Saturating add"]
11276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_m)"]
11277#[inline(always)]
11278#[target_feature(enable = "sve,sve2")]
11279#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11280#[cfg_attr(test, assert_instr(sqadd))]
11281pub fn svqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
11282 unsafe extern "unadjusted" {
11283 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv16i8")]
11284 fn _svqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
11285 }
11286 unsafe { _svqadd_s8_m(pg, op1, op2) }
11287}
11288#[doc = "Saturating add"]
11289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_m)"]
11290#[inline(always)]
11291#[target_feature(enable = "sve,sve2")]
11292#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11293#[cfg_attr(test, assert_instr(sqadd))]
11294pub fn svqadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
11295 svqadd_s8_m(pg, op1, svdup_n_s8(op2))
11296}
11297#[doc = "Saturating add"]
11298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_x)"]
11299#[inline(always)]
11300#[target_feature(enable = "sve,sve2")]
11301#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11302#[cfg_attr(test, assert_instr(sqadd))]
11303pub fn svqadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
11304 svqadd_s8_m(pg, op1, op2)
11305}
11306#[doc = "Saturating add"]
11307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_x)"]
11308#[inline(always)]
11309#[target_feature(enable = "sve,sve2")]
11310#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11311#[cfg_attr(test, assert_instr(sqadd))]
11312pub fn svqadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
11313 svqadd_s8_x(pg, op1, svdup_n_s8(op2))
11314}
11315#[doc = "Saturating add"]
11316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_z)"]
11317#[inline(always)]
11318#[target_feature(enable = "sve,sve2")]
11319#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11320#[cfg_attr(test, assert_instr(sqadd))]
11321pub fn svqadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
11322 svqadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
11323}
11324#[doc = "Saturating add"]
11325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_z)"]
11326#[inline(always)]
11327#[target_feature(enable = "sve,sve2")]
11328#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11329#[cfg_attr(test, assert_instr(sqadd))]
11330pub fn svqadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
11331 svqadd_s8_z(pg, op1, svdup_n_s8(op2))
11332}
11333#[doc = "Saturating add"]
11334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_m)"]
11335#[inline(always)]
11336#[target_feature(enable = "sve,sve2")]
11337#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11338#[cfg_attr(test, assert_instr(sqadd))]
11339pub fn svqadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
11340 unsafe extern "unadjusted" {
11341 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv8i16")]
11342 fn _svqadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
11343 }
11344 unsafe { _svqadd_s16_m(pg.sve_into(), op1, op2) }
11345}
11346#[doc = "Saturating add"]
11347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_m)"]
11348#[inline(always)]
11349#[target_feature(enable = "sve,sve2")]
11350#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11351#[cfg_attr(test, assert_instr(sqadd))]
11352pub fn svqadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
11353 svqadd_s16_m(pg, op1, svdup_n_s16(op2))
11354}
11355#[doc = "Saturating add"]
11356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_x)"]
11357#[inline(always)]
11358#[target_feature(enable = "sve,sve2")]
11359#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11360#[cfg_attr(test, assert_instr(sqadd))]
11361pub fn svqadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
11362 svqadd_s16_m(pg, op1, op2)
11363}
11364#[doc = "Saturating add"]
11365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_x)"]
11366#[inline(always)]
11367#[target_feature(enable = "sve,sve2")]
11368#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11369#[cfg_attr(test, assert_instr(sqadd))]
11370pub fn svqadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
11371 svqadd_s16_x(pg, op1, svdup_n_s16(op2))
11372}
11373#[doc = "Saturating add"]
11374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_z)"]
11375#[inline(always)]
11376#[target_feature(enable = "sve,sve2")]
11377#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11378#[cfg_attr(test, assert_instr(sqadd))]
11379pub fn svqadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
11380 svqadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
11381}
11382#[doc = "Saturating add"]
11383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_z)"]
11384#[inline(always)]
11385#[target_feature(enable = "sve,sve2")]
11386#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11387#[cfg_attr(test, assert_instr(sqadd))]
11388pub fn svqadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
11389 svqadd_s16_z(pg, op1, svdup_n_s16(op2))
11390}
11391#[doc = "Saturating add"]
11392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_m)"]
11393#[inline(always)]
11394#[target_feature(enable = "sve,sve2")]
11395#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11396#[cfg_attr(test, assert_instr(sqadd))]
11397pub fn svqadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
11398 unsafe extern "unadjusted" {
11399 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv4i32")]
11400 fn _svqadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
11401 }
11402 unsafe { _svqadd_s32_m(pg.sve_into(), op1, op2) }
11403}
11404#[doc = "Saturating add"]
11405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_m)"]
11406#[inline(always)]
11407#[target_feature(enable = "sve,sve2")]
11408#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11409#[cfg_attr(test, assert_instr(sqadd))]
11410pub fn svqadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
11411 svqadd_s32_m(pg, op1, svdup_n_s32(op2))
11412}
11413#[doc = "Saturating add"]
11414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_x)"]
11415#[inline(always)]
11416#[target_feature(enable = "sve,sve2")]
11417#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11418#[cfg_attr(test, assert_instr(sqadd))]
11419pub fn svqadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
11420 svqadd_s32_m(pg, op1, op2)
11421}
11422#[doc = "Saturating add"]
11423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_x)"]
11424#[inline(always)]
11425#[target_feature(enable = "sve,sve2")]
11426#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11427#[cfg_attr(test, assert_instr(sqadd))]
11428pub fn svqadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
11429 svqadd_s32_x(pg, op1, svdup_n_s32(op2))
11430}
11431#[doc = "Saturating add"]
11432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_z)"]
11433#[inline(always)]
11434#[target_feature(enable = "sve,sve2")]
11435#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11436#[cfg_attr(test, assert_instr(sqadd))]
11437pub fn svqadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
11438 svqadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
11439}
11440#[doc = "Saturating add"]
11441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_z)"]
11442#[inline(always)]
11443#[target_feature(enable = "sve,sve2")]
11444#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11445#[cfg_attr(test, assert_instr(sqadd))]
11446pub fn svqadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
11447 svqadd_s32_z(pg, op1, svdup_n_s32(op2))
11448}
11449#[doc = "Saturating add"]
11450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_m)"]
11451#[inline(always)]
11452#[target_feature(enable = "sve,sve2")]
11453#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11454#[cfg_attr(test, assert_instr(sqadd))]
11455pub fn svqadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
11456 unsafe extern "unadjusted" {
11457 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv2i64")]
11458 fn _svqadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
11459 }
11460 unsafe { _svqadd_s64_m(pg.sve_into(), op1, op2) }
11461}
11462#[doc = "Saturating add"]
11463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_m)"]
11464#[inline(always)]
11465#[target_feature(enable = "sve,sve2")]
11466#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11467#[cfg_attr(test, assert_instr(sqadd))]
11468pub fn svqadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
11469 svqadd_s64_m(pg, op1, svdup_n_s64(op2))
11470}
11471#[doc = "Saturating add"]
11472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_x)"]
11473#[inline(always)]
11474#[target_feature(enable = "sve,sve2")]
11475#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11476#[cfg_attr(test, assert_instr(sqadd))]
11477pub fn svqadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
11478 svqadd_s64_m(pg, op1, op2)
11479}
11480#[doc = "Saturating add"]
11481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_x)"]
11482#[inline(always)]
11483#[target_feature(enable = "sve,sve2")]
11484#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11485#[cfg_attr(test, assert_instr(sqadd))]
11486pub fn svqadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
11487 svqadd_s64_x(pg, op1, svdup_n_s64(op2))
11488}
11489#[doc = "Saturating add"]
11490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_z)"]
11491#[inline(always)]
11492#[target_feature(enable = "sve,sve2")]
11493#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11494#[cfg_attr(test, assert_instr(sqadd))]
11495pub fn svqadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
11496 svqadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
11497}
11498#[doc = "Saturating add"]
11499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_z)"]
11500#[inline(always)]
11501#[target_feature(enable = "sve,sve2")]
11502#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11503#[cfg_attr(test, assert_instr(sqadd))]
11504pub fn svqadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
11505 svqadd_s64_z(pg, op1, svdup_n_s64(op2))
11506}
11507#[doc = "Saturating add"]
11508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_m)"]
11509#[inline(always)]
11510#[target_feature(enable = "sve,sve2")]
11511#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11512#[cfg_attr(test, assert_instr(uqadd))]
11513pub fn svqadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
11514 unsafe extern "unadjusted" {
11515 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv16i8")]
11516 fn _svqadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
11517 }
11518 unsafe { _svqadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
11519}
11520#[doc = "Saturating add"]
11521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_m)"]
11522#[inline(always)]
11523#[target_feature(enable = "sve,sve2")]
11524#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11525#[cfg_attr(test, assert_instr(uqadd))]
11526pub fn svqadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
11527 svqadd_u8_m(pg, op1, svdup_n_u8(op2))
11528}
11529#[doc = "Saturating add"]
11530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_x)"]
11531#[inline(always)]
11532#[target_feature(enable = "sve,sve2")]
11533#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11534#[cfg_attr(test, assert_instr(uqadd))]
11535pub fn svqadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
11536 svqadd_u8_m(pg, op1, op2)
11537}
11538#[doc = "Saturating add"]
11539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_x)"]
11540#[inline(always)]
11541#[target_feature(enable = "sve,sve2")]
11542#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11543#[cfg_attr(test, assert_instr(uqadd))]
11544pub fn svqadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
11545 svqadd_u8_x(pg, op1, svdup_n_u8(op2))
11546}
11547#[doc = "Saturating add"]
11548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_z)"]
11549#[inline(always)]
11550#[target_feature(enable = "sve,sve2")]
11551#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11552#[cfg_attr(test, assert_instr(uqadd))]
11553pub fn svqadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
11554 svqadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
11555}
11556#[doc = "Saturating add"]
11557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_z)"]
11558#[inline(always)]
11559#[target_feature(enable = "sve,sve2")]
11560#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11561#[cfg_attr(test, assert_instr(uqadd))]
11562pub fn svqadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
11563 svqadd_u8_z(pg, op1, svdup_n_u8(op2))
11564}
11565#[doc = "Saturating add"]
11566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_m)"]
11567#[inline(always)]
11568#[target_feature(enable = "sve,sve2")]
11569#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11570#[cfg_attr(test, assert_instr(uqadd))]
11571pub fn svqadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
11572 unsafe extern "unadjusted" {
11573 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv8i16")]
11574 fn _svqadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
11575 }
11576 unsafe { _svqadd_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
11577}
11578#[doc = "Saturating add"]
11579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_m)"]
11580#[inline(always)]
11581#[target_feature(enable = "sve,sve2")]
11582#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11583#[cfg_attr(test, assert_instr(uqadd))]
11584pub fn svqadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
11585 svqadd_u16_m(pg, op1, svdup_n_u16(op2))
11586}
11587#[doc = "Saturating add"]
11588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_x)"]
11589#[inline(always)]
11590#[target_feature(enable = "sve,sve2")]
11591#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11592#[cfg_attr(test, assert_instr(uqadd))]
11593pub fn svqadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
11594 svqadd_u16_m(pg, op1, op2)
11595}
11596#[doc = "Saturating add"]
11597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_x)"]
11598#[inline(always)]
11599#[target_feature(enable = "sve,sve2")]
11600#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11601#[cfg_attr(test, assert_instr(uqadd))]
11602pub fn svqadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
11603 svqadd_u16_x(pg, op1, svdup_n_u16(op2))
11604}
11605#[doc = "Saturating add"]
11606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_z)"]
11607#[inline(always)]
11608#[target_feature(enable = "sve,sve2")]
11609#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11610#[cfg_attr(test, assert_instr(uqadd))]
11611pub fn svqadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
11612 svqadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
11613}
11614#[doc = "Saturating add"]
11615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_z)"]
11616#[inline(always)]
11617#[target_feature(enable = "sve,sve2")]
11618#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11619#[cfg_attr(test, assert_instr(uqadd))]
11620pub fn svqadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
11621 svqadd_u16_z(pg, op1, svdup_n_u16(op2))
11622}
11623#[doc = "Saturating add"]
11624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_m)"]
11625#[inline(always)]
11626#[target_feature(enable = "sve,sve2")]
11627#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11628#[cfg_attr(test, assert_instr(uqadd))]
11629pub fn svqadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
11630 unsafe extern "unadjusted" {
11631 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv4i32")]
11632 fn _svqadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
11633 }
11634 unsafe { _svqadd_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
11635}
11636#[doc = "Saturating add"]
11637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_m)"]
11638#[inline(always)]
11639#[target_feature(enable = "sve,sve2")]
11640#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11641#[cfg_attr(test, assert_instr(uqadd))]
11642pub fn svqadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
11643 svqadd_u32_m(pg, op1, svdup_n_u32(op2))
11644}
11645#[doc = "Saturating add"]
11646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_x)"]
11647#[inline(always)]
11648#[target_feature(enable = "sve,sve2")]
11649#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11650#[cfg_attr(test, assert_instr(uqadd))]
11651pub fn svqadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
11652 svqadd_u32_m(pg, op1, op2)
11653}
11654#[doc = "Saturating add"]
11655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_x)"]
11656#[inline(always)]
11657#[target_feature(enable = "sve,sve2")]
11658#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11659#[cfg_attr(test, assert_instr(uqadd))]
11660pub fn svqadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
11661 svqadd_u32_x(pg, op1, svdup_n_u32(op2))
11662}
11663#[doc = "Saturating add"]
11664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_z)"]
11665#[inline(always)]
11666#[target_feature(enable = "sve,sve2")]
11667#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11668#[cfg_attr(test, assert_instr(uqadd))]
11669pub fn svqadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
11670 svqadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
11671}
11672#[doc = "Saturating add"]
11673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_z)"]
11674#[inline(always)]
11675#[target_feature(enable = "sve,sve2")]
11676#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11677#[cfg_attr(test, assert_instr(uqadd))]
11678pub fn svqadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
11679 svqadd_u32_z(pg, op1, svdup_n_u32(op2))
11680}
11681#[doc = "Saturating add"]
11682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_m)"]
11683#[inline(always)]
11684#[target_feature(enable = "sve,sve2")]
11685#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11686#[cfg_attr(test, assert_instr(uqadd))]
11687pub fn svqadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
11688 unsafe extern "unadjusted" {
11689 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv2i64")]
11690 fn _svqadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
11691 }
11692 unsafe { _svqadd_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
11693}
11694#[doc = "Saturating add"]
11695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_m)"]
11696#[inline(always)]
11697#[target_feature(enable = "sve,sve2")]
11698#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11699#[cfg_attr(test, assert_instr(uqadd))]
11700pub fn svqadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
11701 svqadd_u64_m(pg, op1, svdup_n_u64(op2))
11702}
11703#[doc = "Saturating add"]
11704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_x)"]
11705#[inline(always)]
11706#[target_feature(enable = "sve,sve2")]
11707#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11708#[cfg_attr(test, assert_instr(uqadd))]
11709pub fn svqadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
11710 svqadd_u64_m(pg, op1, op2)
11711}
11712#[doc = "Saturating add"]
11713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_x)"]
11714#[inline(always)]
11715#[target_feature(enable = "sve,sve2")]
11716#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11717#[cfg_attr(test, assert_instr(uqadd))]
11718pub fn svqadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
11719 svqadd_u64_x(pg, op1, svdup_n_u64(op2))
11720}
11721#[doc = "Saturating add"]
11722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_z)"]
11723#[inline(always)]
11724#[target_feature(enable = "sve,sve2")]
11725#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11726#[cfg_attr(test, assert_instr(uqadd))]
11727pub fn svqadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
11728 svqadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
11729}
11730#[doc = "Saturating add"]
11731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_z)"]
11732#[inline(always)]
11733#[target_feature(enable = "sve,sve2")]
11734#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11735#[cfg_attr(test, assert_instr(uqadd))]
11736pub fn svqadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
11737 svqadd_u64_z(pg, op1, svdup_n_u64(op2))
11738}
11739#[doc = "Saturating complex add with rotate"]
11740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s8])"]
11741#[inline(always)]
11742#[target_feature(enable = "sve,sve2")]
11743#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11744#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))]
11745pub fn svqcadd_s8<const IMM_ROTATION: i32>(op1: svint8_t, op2: svint8_t) -> svint8_t {
11746 static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
11747 unsafe extern "unadjusted" {
11748 #[cfg_attr(
11749 target_arch = "aarch64",
11750 link_name = "llvm.aarch64.sve.sqcadd.x.nxv16i8"
11751 )]
11752 fn _svqcadd_s8(op1: svint8_t, op2: svint8_t, imm_rotation: i32) -> svint8_t;
11753 }
11754 unsafe { _svqcadd_s8(op1, op2, IMM_ROTATION) }
11755}
11756#[doc = "Saturating complex add with rotate"]
11757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s16])"]
11758#[inline(always)]
11759#[target_feature(enable = "sve,sve2")]
11760#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11761#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))]
11762pub fn svqcadd_s16<const IMM_ROTATION: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
11763 static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
11764 unsafe extern "unadjusted" {
11765 #[cfg_attr(
11766 target_arch = "aarch64",
11767 link_name = "llvm.aarch64.sve.sqcadd.x.nxv8i16"
11768 )]
11769 fn _svqcadd_s16(op1: svint16_t, op2: svint16_t, imm_rotation: i32) -> svint16_t;
11770 }
11771 unsafe { _svqcadd_s16(op1, op2, IMM_ROTATION) }
11772}
11773#[doc = "Saturating complex add with rotate"]
11774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s32])"]
11775#[inline(always)]
11776#[target_feature(enable = "sve,sve2")]
11777#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11778#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))]
11779pub fn svqcadd_s32<const IMM_ROTATION: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
11780 static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
11781 unsafe extern "unadjusted" {
11782 #[cfg_attr(
11783 target_arch = "aarch64",
11784 link_name = "llvm.aarch64.sve.sqcadd.x.nxv4i32"
11785 )]
11786 fn _svqcadd_s32(op1: svint32_t, op2: svint32_t, imm_rotation: i32) -> svint32_t;
11787 }
11788 unsafe { _svqcadd_s32(op1, op2, IMM_ROTATION) }
11789}
11790#[doc = "Saturating complex add with rotate"]
11791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s64])"]
11792#[inline(always)]
11793#[target_feature(enable = "sve,sve2")]
11794#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11795#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))]
11796pub fn svqcadd_s64<const IMM_ROTATION: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
11797 static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
11798 unsafe extern "unadjusted" {
11799 #[cfg_attr(
11800 target_arch = "aarch64",
11801 link_name = "llvm.aarch64.sve.sqcadd.x.nxv2i64"
11802 )]
11803 fn _svqcadd_s64(op1: svint64_t, op2: svint64_t, imm_rotation: i32) -> svint64_t;
11804 }
11805 unsafe { _svqcadd_s64(op1, op2, IMM_ROTATION) }
11806}
11807#[doc = "Saturating doubling multiply-add long (bottom)"]
11808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb_lane[_s32])"]
11809#[inline(always)]
11810#[target_feature(enable = "sve,sve2")]
11811#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11812#[cfg_attr(test, assert_instr(sqdmlalb, IMM_INDEX = 0))]
11813pub fn svqdmlalb_lane_s32<const IMM_INDEX: i32>(
11814 op1: svint32_t,
11815 op2: svint16_t,
11816 op3: svint16_t,
11817) -> svint32_t {
11818 static_assert_range!(IMM_INDEX, 0..=7);
11819 unsafe extern "unadjusted" {
11820 #[cfg_attr(
11821 target_arch = "aarch64",
11822 link_name = "llvm.aarch64.sve.sqdmlalb.lane.nxv4i32"
11823 )]
11824 fn _svqdmlalb_lane_s32(
11825 op1: svint32_t,
11826 op2: svint16_t,
11827 op3: svint16_t,
11828 IMM_INDEX: i32,
11829 ) -> svint32_t;
11830 }
11831 unsafe { _svqdmlalb_lane_s32(op1, op2, op3, IMM_INDEX) }
11832}
11833#[doc = "Saturating doubling multiply-add long (bottom)"]
11834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb_lane[_s64])"]
11835#[inline(always)]
11836#[target_feature(enable = "sve,sve2")]
11837#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11838#[cfg_attr(test, assert_instr(sqdmlalb, IMM_INDEX = 0))]
11839pub fn svqdmlalb_lane_s64<const IMM_INDEX: i32>(
11840 op1: svint64_t,
11841 op2: svint32_t,
11842 op3: svint32_t,
11843) -> svint64_t {
11844 static_assert_range!(IMM_INDEX, 0..=3);
11845 unsafe extern "unadjusted" {
11846 #[cfg_attr(
11847 target_arch = "aarch64",
11848 link_name = "llvm.aarch64.sve.sqdmlalb.lane.nxv2i64"
11849 )]
11850 fn _svqdmlalb_lane_s64(
11851 op1: svint64_t,
11852 op2: svint32_t,
11853 op3: svint32_t,
11854 IMM_INDEX: i32,
11855 ) -> svint64_t;
11856 }
11857 unsafe { _svqdmlalb_lane_s64(op1, op2, op3, IMM_INDEX) }
11858}
11859#[doc = "Saturating doubling multiply-add long (bottom)"]
11860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s16])"]
11861#[inline(always)]
11862#[target_feature(enable = "sve,sve2")]
11863#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11864#[cfg_attr(test, assert_instr(sqdmlalb))]
11865pub fn svqdmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
11866 unsafe extern "unadjusted" {
11867 #[cfg_attr(
11868 target_arch = "aarch64",
11869 link_name = "llvm.aarch64.sve.sqdmlalb.nxv8i16"
11870 )]
11871 fn _svqdmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
11872 }
11873 unsafe { _svqdmlalb_s16(op1, op2, op3) }
11874}
11875#[doc = "Saturating doubling multiply-add long (bottom)"]
11876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s16])"]
11877#[inline(always)]
11878#[target_feature(enable = "sve,sve2")]
11879#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11880#[cfg_attr(test, assert_instr(sqdmlalb))]
11881pub fn svqdmlalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
11882 svqdmlalb_s16(op1, op2, svdup_n_s8(op3))
11883}
11884#[doc = "Saturating doubling multiply-add long (bottom)"]
11885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s32])"]
11886#[inline(always)]
11887#[target_feature(enable = "sve,sve2")]
11888#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11889#[cfg_attr(test, assert_instr(sqdmlalb))]
11890pub fn svqdmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
11891 unsafe extern "unadjusted" {
11892 #[cfg_attr(
11893 target_arch = "aarch64",
11894 link_name = "llvm.aarch64.sve.sqdmlalb.nxv4i32"
11895 )]
11896 fn _svqdmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
11897 }
11898 unsafe { _svqdmlalb_s32(op1, op2, op3) }
11899}
11900#[doc = "Saturating doubling multiply-add long (bottom)"]
11901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s32])"]
11902#[inline(always)]
11903#[target_feature(enable = "sve,sve2")]
11904#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11905#[cfg_attr(test, assert_instr(sqdmlalb))]
11906pub fn svqdmlalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
11907 svqdmlalb_s32(op1, op2, svdup_n_s16(op3))
11908}
11909#[doc = "Saturating doubling multiply-add long (bottom)"]
11910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s64])"]
11911#[inline(always)]
11912#[target_feature(enable = "sve,sve2")]
11913#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11914#[cfg_attr(test, assert_instr(sqdmlalb))]
11915pub fn svqdmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
11916 unsafe extern "unadjusted" {
11917 #[cfg_attr(
11918 target_arch = "aarch64",
11919 link_name = "llvm.aarch64.sve.sqdmlalb.nxv2i64"
11920 )]
11921 fn _svqdmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
11922 }
11923 unsafe { _svqdmlalb_s64(op1, op2, op3) }
11924}
11925#[doc = "Saturating doubling multiply-add long (bottom)"]
11926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s64])"]
11927#[inline(always)]
11928#[target_feature(enable = "sve,sve2")]
11929#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11930#[cfg_attr(test, assert_instr(sqdmlalb))]
11931pub fn svqdmlalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
11932 svqdmlalb_s64(op1, op2, svdup_n_s32(op3))
11933}
11934#[doc = "Saturating doubling multiply-add long (bottom × top)"]
11935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s16])"]
11936#[inline(always)]
11937#[target_feature(enable = "sve,sve2")]
11938#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11939#[cfg_attr(test, assert_instr(sqdmlalbt))]
11940pub fn svqdmlalbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
11941 unsafe extern "unadjusted" {
11942 #[cfg_attr(
11943 target_arch = "aarch64",
11944 link_name = "llvm.aarch64.sve.sqdmlalbt.nxv8i16"
11945 )]
11946 fn _svqdmlalbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
11947 }
11948 unsafe { _svqdmlalbt_s16(op1, op2, op3) }
11949}
11950#[doc = "Saturating doubling multiply-add long (bottom × top)"]
11951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s16])"]
11952#[inline(always)]
11953#[target_feature(enable = "sve,sve2")]
11954#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11955#[cfg_attr(test, assert_instr(sqdmlalbt))]
11956pub fn svqdmlalbt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
11957 svqdmlalbt_s16(op1, op2, svdup_n_s8(op3))
11958}
11959#[doc = "Saturating doubling multiply-add long (bottom × top)"]
11960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s32])"]
11961#[inline(always)]
11962#[target_feature(enable = "sve,sve2")]
11963#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11964#[cfg_attr(test, assert_instr(sqdmlalbt))]
11965pub fn svqdmlalbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
11966 unsafe extern "unadjusted" {
11967 #[cfg_attr(
11968 target_arch = "aarch64",
11969 link_name = "llvm.aarch64.sve.sqdmlalbt.nxv4i32"
11970 )]
11971 fn _svqdmlalbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
11972 }
11973 unsafe { _svqdmlalbt_s32(op1, op2, op3) }
11974}
11975#[doc = "Saturating doubling multiply-add long (bottom × top)"]
11976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s32])"]
11977#[inline(always)]
11978#[target_feature(enable = "sve,sve2")]
11979#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11980#[cfg_attr(test, assert_instr(sqdmlalbt))]
11981pub fn svqdmlalbt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
11982 svqdmlalbt_s32(op1, op2, svdup_n_s16(op3))
11983}
11984#[doc = "Saturating doubling multiply-add long (bottom × top)"]
11985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s64])"]
11986#[inline(always)]
11987#[target_feature(enable = "sve,sve2")]
11988#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11989#[cfg_attr(test, assert_instr(sqdmlalbt))]
11990pub fn svqdmlalbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
11991 unsafe extern "unadjusted" {
11992 #[cfg_attr(
11993 target_arch = "aarch64",
11994 link_name = "llvm.aarch64.sve.sqdmlalbt.nxv2i64"
11995 )]
11996 fn _svqdmlalbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
11997 }
11998 unsafe { _svqdmlalbt_s64(op1, op2, op3) }
11999}
12000#[doc = "Saturating doubling multiply-add long (bottom × top)"]
12001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s64])"]
12002#[inline(always)]
12003#[target_feature(enable = "sve,sve2")]
12004#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12005#[cfg_attr(test, assert_instr(sqdmlalbt))]
12006pub fn svqdmlalbt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
12007 svqdmlalbt_s64(op1, op2, svdup_n_s32(op3))
12008}
12009#[doc = "Saturating doubling multiply-add long (top)"]
12010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt_lane[_s32])"]
12011#[inline(always)]
12012#[target_feature(enable = "sve,sve2")]
12013#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12014#[cfg_attr(test, assert_instr(sqdmlalt, IMM_INDEX = 0))]
12015pub fn svqdmlalt_lane_s32<const IMM_INDEX: i32>(
12016 op1: svint32_t,
12017 op2: svint16_t,
12018 op3: svint16_t,
12019) -> svint32_t {
12020 static_assert_range!(IMM_INDEX, 0..=7);
12021 unsafe extern "unadjusted" {
12022 #[cfg_attr(
12023 target_arch = "aarch64",
12024 link_name = "llvm.aarch64.sve.sqdmlalt.lane.nxv4i32"
12025 )]
12026 fn _svqdmlalt_lane_s32(
12027 op1: svint32_t,
12028 op2: svint16_t,
12029 op3: svint16_t,
12030 IMM_INDEX: i32,
12031 ) -> svint32_t;
12032 }
12033 unsafe { _svqdmlalt_lane_s32(op1, op2, op3, IMM_INDEX) }
12034}
12035#[doc = "Saturating doubling multiply-add long (top)"]
12036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt_lane[_s64])"]
12037#[inline(always)]
12038#[target_feature(enable = "sve,sve2")]
12039#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12040#[cfg_attr(test, assert_instr(sqdmlalt, IMM_INDEX = 0))]
12041pub fn svqdmlalt_lane_s64<const IMM_INDEX: i32>(
12042 op1: svint64_t,
12043 op2: svint32_t,
12044 op3: svint32_t,
12045) -> svint64_t {
12046 static_assert_range!(IMM_INDEX, 0..=3);
12047 unsafe extern "unadjusted" {
12048 #[cfg_attr(
12049 target_arch = "aarch64",
12050 link_name = "llvm.aarch64.sve.sqdmlalt.lane.nxv2i64"
12051 )]
12052 fn _svqdmlalt_lane_s64(
12053 op1: svint64_t,
12054 op2: svint32_t,
12055 op3: svint32_t,
12056 IMM_INDEX: i32,
12057 ) -> svint64_t;
12058 }
12059 unsafe { _svqdmlalt_lane_s64(op1, op2, op3, IMM_INDEX) }
12060}
12061#[doc = "Saturating doubling multiply-add long (top)"]
12062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s16])"]
12063#[inline(always)]
12064#[target_feature(enable = "sve,sve2")]
12065#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12066#[cfg_attr(test, assert_instr(sqdmlalt))]
12067pub fn svqdmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
12068 unsafe extern "unadjusted" {
12069 #[cfg_attr(
12070 target_arch = "aarch64",
12071 link_name = "llvm.aarch64.sve.sqdmlalt.nxv8i16"
12072 )]
12073 fn _svqdmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
12074 }
12075 unsafe { _svqdmlalt_s16(op1, op2, op3) }
12076}
12077#[doc = "Saturating doubling multiply-add long (top)"]
12078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s16])"]
12079#[inline(always)]
12080#[target_feature(enable = "sve,sve2")]
12081#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12082#[cfg_attr(test, assert_instr(sqdmlalt))]
12083pub fn svqdmlalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
12084 svqdmlalt_s16(op1, op2, svdup_n_s8(op3))
12085}
12086#[doc = "Saturating doubling multiply-add long (top)"]
12087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s32])"]
12088#[inline(always)]
12089#[target_feature(enable = "sve,sve2")]
12090#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12091#[cfg_attr(test, assert_instr(sqdmlalt))]
12092pub fn svqdmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
12093 unsafe extern "unadjusted" {
12094 #[cfg_attr(
12095 target_arch = "aarch64",
12096 link_name = "llvm.aarch64.sve.sqdmlalt.nxv4i32"
12097 )]
12098 fn _svqdmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
12099 }
12100 unsafe { _svqdmlalt_s32(op1, op2, op3) }
12101}
12102#[doc = "Saturating doubling multiply-add long (top)"]
12103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s32])"]
12104#[inline(always)]
12105#[target_feature(enable = "sve,sve2")]
12106#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12107#[cfg_attr(test, assert_instr(sqdmlalt))]
12108pub fn svqdmlalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
12109 svqdmlalt_s32(op1, op2, svdup_n_s16(op3))
12110}
12111#[doc = "Saturating doubling multiply-add long (top)"]
12112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s64])"]
12113#[inline(always)]
12114#[target_feature(enable = "sve,sve2")]
12115#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12116#[cfg_attr(test, assert_instr(sqdmlalt))]
12117pub fn svqdmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
12118 unsafe extern "unadjusted" {
12119 #[cfg_attr(
12120 target_arch = "aarch64",
12121 link_name = "llvm.aarch64.sve.sqdmlalt.nxv2i64"
12122 )]
12123 fn _svqdmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
12124 }
12125 unsafe { _svqdmlalt_s64(op1, op2, op3) }
12126}
12127#[doc = "Saturating doubling multiply-add long (top)"]
12128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s64])"]
12129#[inline(always)]
12130#[target_feature(enable = "sve,sve2")]
12131#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12132#[cfg_attr(test, assert_instr(sqdmlalt))]
12133pub fn svqdmlalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
12134 svqdmlalt_s64(op1, op2, svdup_n_s32(op3))
12135}
12136#[doc = "Saturating doubling multiply-subtract long (bottom)"]
12137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb_lane[_s32])"]
12138#[inline(always)]
12139#[target_feature(enable = "sve,sve2")]
12140#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12141#[cfg_attr(test, assert_instr(sqdmlslb, IMM_INDEX = 0))]
12142pub fn svqdmlslb_lane_s32<const IMM_INDEX: i32>(
12143 op1: svint32_t,
12144 op2: svint16_t,
12145 op3: svint16_t,
12146) -> svint32_t {
12147 static_assert_range!(IMM_INDEX, 0..=7);
12148 unsafe extern "unadjusted" {
12149 #[cfg_attr(
12150 target_arch = "aarch64",
12151 link_name = "llvm.aarch64.sve.sqdmlslb.lane.nxv4i32"
12152 )]
12153 fn _svqdmlslb_lane_s32(
12154 op1: svint32_t,
12155 op2: svint16_t,
12156 op3: svint16_t,
12157 IMM_INDEX: i32,
12158 ) -> svint32_t;
12159 }
12160 unsafe { _svqdmlslb_lane_s32(op1, op2, op3, IMM_INDEX) }
12161}
12162#[doc = "Saturating doubling multiply-subtract long (bottom)"]
12163#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb_lane[_s64])"]
12164#[inline(always)]
12165#[target_feature(enable = "sve,sve2")]
12166#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12167#[cfg_attr(test, assert_instr(sqdmlslb, IMM_INDEX = 0))]
12168pub fn svqdmlslb_lane_s64<const IMM_INDEX: i32>(
12169 op1: svint64_t,
12170 op2: svint32_t,
12171 op3: svint32_t,
12172) -> svint64_t {
12173 static_assert_range!(IMM_INDEX, 0..=3);
12174 unsafe extern "unadjusted" {
12175 #[cfg_attr(
12176 target_arch = "aarch64",
12177 link_name = "llvm.aarch64.sve.sqdmlslb.lane.nxv2i64"
12178 )]
12179 fn _svqdmlslb_lane_s64(
12180 op1: svint64_t,
12181 op2: svint32_t,
12182 op3: svint32_t,
12183 IMM_INDEX: i32,
12184 ) -> svint64_t;
12185 }
12186 unsafe { _svqdmlslb_lane_s64(op1, op2, op3, IMM_INDEX) }
12187}
12188#[doc = "Saturating doubling multiply-subtract long (bottom)"]
12189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s16])"]
12190#[inline(always)]
12191#[target_feature(enable = "sve,sve2")]
12192#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12193#[cfg_attr(test, assert_instr(sqdmlslb))]
12194pub fn svqdmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
12195 unsafe extern "unadjusted" {
12196 #[cfg_attr(
12197 target_arch = "aarch64",
12198 link_name = "llvm.aarch64.sve.sqdmlslb.nxv8i16"
12199 )]
12200 fn _svqdmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
12201 }
12202 unsafe { _svqdmlslb_s16(op1, op2, op3) }
12203}
12204#[doc = "Saturating doubling multiply-subtract long (bottom)"]
12205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s16])"]
12206#[inline(always)]
12207#[target_feature(enable = "sve,sve2")]
12208#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12209#[cfg_attr(test, assert_instr(sqdmlslb))]
12210pub fn svqdmlslb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
12211 svqdmlslb_s16(op1, op2, svdup_n_s8(op3))
12212}
12213#[doc = "Saturating doubling multiply-subtract long (bottom)"]
12214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s32])"]
12215#[inline(always)]
12216#[target_feature(enable = "sve,sve2")]
12217#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12218#[cfg_attr(test, assert_instr(sqdmlslb))]
12219pub fn svqdmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
12220 unsafe extern "unadjusted" {
12221 #[cfg_attr(
12222 target_arch = "aarch64",
12223 link_name = "llvm.aarch64.sve.sqdmlslb.nxv4i32"
12224 )]
12225 fn _svqdmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
12226 }
12227 unsafe { _svqdmlslb_s32(op1, op2, op3) }
12228}
12229#[doc = "Saturating doubling multiply-subtract long (bottom)"]
12230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s32])"]
12231#[inline(always)]
12232#[target_feature(enable = "sve,sve2")]
12233#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12234#[cfg_attr(test, assert_instr(sqdmlslb))]
12235pub fn svqdmlslb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
12236 svqdmlslb_s32(op1, op2, svdup_n_s16(op3))
12237}
12238#[doc = "Saturating doubling multiply-subtract long (bottom)"]
12239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s64])"]
12240#[inline(always)]
12241#[target_feature(enable = "sve,sve2")]
12242#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12243#[cfg_attr(test, assert_instr(sqdmlslb))]
12244pub fn svqdmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
12245 unsafe extern "unadjusted" {
12246 #[cfg_attr(
12247 target_arch = "aarch64",
12248 link_name = "llvm.aarch64.sve.sqdmlslb.nxv2i64"
12249 )]
12250 fn _svqdmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
12251 }
12252 unsafe { _svqdmlslb_s64(op1, op2, op3) }
12253}
12254#[doc = "Saturating doubling multiply-subtract long (bottom)"]
12255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s64])"]
12256#[inline(always)]
12257#[target_feature(enable = "sve,sve2")]
12258#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12259#[cfg_attr(test, assert_instr(sqdmlslb))]
12260pub fn svqdmlslb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
12261 svqdmlslb_s64(op1, op2, svdup_n_s32(op3))
12262}
12263#[doc = "Saturating doubling multiply-subtract long (bottom × top)"]
12264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s16])"]
12265#[inline(always)]
12266#[target_feature(enable = "sve,sve2")]
12267#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12268#[cfg_attr(test, assert_instr(sqdmlslbt))]
12269pub fn svqdmlslbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
12270 unsafe extern "unadjusted" {
12271 #[cfg_attr(
12272 target_arch = "aarch64",
12273 link_name = "llvm.aarch64.sve.sqdmlslbt.nxv8i16"
12274 )]
12275 fn _svqdmlslbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
12276 }
12277 unsafe { _svqdmlslbt_s16(op1, op2, op3) }
12278}
12279#[doc = "Saturating doubling multiply-subtract long (bottom × top)"]
12280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s16])"]
12281#[inline(always)]
12282#[target_feature(enable = "sve,sve2")]
12283#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12284#[cfg_attr(test, assert_instr(sqdmlslbt))]
12285pub fn svqdmlslbt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
12286 svqdmlslbt_s16(op1, op2, svdup_n_s8(op3))
12287}
12288#[doc = "Saturating doubling multiply-subtract long (bottom × top)"]
12289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s32])"]
12290#[inline(always)]
12291#[target_feature(enable = "sve,sve2")]
12292#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12293#[cfg_attr(test, assert_instr(sqdmlslbt))]
12294pub fn svqdmlslbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
12295 unsafe extern "unadjusted" {
12296 #[cfg_attr(
12297 target_arch = "aarch64",
12298 link_name = "llvm.aarch64.sve.sqdmlslbt.nxv4i32"
12299 )]
12300 fn _svqdmlslbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
12301 }
12302 unsafe { _svqdmlslbt_s32(op1, op2, op3) }
12303}
12304#[doc = "Saturating doubling multiply-subtract long (bottom × top)"]
12305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s32])"]
12306#[inline(always)]
12307#[target_feature(enable = "sve,sve2")]
12308#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12309#[cfg_attr(test, assert_instr(sqdmlslbt))]
12310pub fn svqdmlslbt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
12311 svqdmlslbt_s32(op1, op2, svdup_n_s16(op3))
12312}
12313#[doc = "Saturating doubling multiply-subtract long (bottom × top)"]
12314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s64])"]
12315#[inline(always)]
12316#[target_feature(enable = "sve,sve2")]
12317#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12318#[cfg_attr(test, assert_instr(sqdmlslbt))]
12319pub fn svqdmlslbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
12320 unsafe extern "unadjusted" {
12321 #[cfg_attr(
12322 target_arch = "aarch64",
12323 link_name = "llvm.aarch64.sve.sqdmlslbt.nxv2i64"
12324 )]
12325 fn _svqdmlslbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
12326 }
12327 unsafe { _svqdmlslbt_s64(op1, op2, op3) }
12328}
12329#[doc = "Saturating doubling multiply-subtract long (bottom × top)"]
12330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s64])"]
12331#[inline(always)]
12332#[target_feature(enable = "sve,sve2")]
12333#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12334#[cfg_attr(test, assert_instr(sqdmlslbt))]
12335pub fn svqdmlslbt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
12336 svqdmlslbt_s64(op1, op2, svdup_n_s32(op3))
12337}
12338#[doc = "Saturating doubling multiply-subtract long (top)"]
12339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt_lane[_s32])"]
12340#[inline(always)]
12341#[target_feature(enable = "sve,sve2")]
12342#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12343#[cfg_attr(test, assert_instr(sqdmlslt, IMM_INDEX = 0))]
12344pub fn svqdmlslt_lane_s32<const IMM_INDEX: i32>(
12345 op1: svint32_t,
12346 op2: svint16_t,
12347 op3: svint16_t,
12348) -> svint32_t {
12349 static_assert_range!(IMM_INDEX, 0..=7);
12350 unsafe extern "unadjusted" {
12351 #[cfg_attr(
12352 target_arch = "aarch64",
12353 link_name = "llvm.aarch64.sve.sqdmlslt.lane.nxv4i32"
12354 )]
12355 fn _svqdmlslt_lane_s32(
12356 op1: svint32_t,
12357 op2: svint16_t,
12358 op3: svint16_t,
12359 IMM_INDEX: i32,
12360 ) -> svint32_t;
12361 }
12362 unsafe { _svqdmlslt_lane_s32(op1, op2, op3, IMM_INDEX) }
12363}
12364#[doc = "Saturating doubling multiply-subtract long (top)"]
12365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt_lane[_s64])"]
12366#[inline(always)]
12367#[target_feature(enable = "sve,sve2")]
12368#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12369#[cfg_attr(test, assert_instr(sqdmlslt, IMM_INDEX = 0))]
12370pub fn svqdmlslt_lane_s64<const IMM_INDEX: i32>(
12371 op1: svint64_t,
12372 op2: svint32_t,
12373 op3: svint32_t,
12374) -> svint64_t {
12375 static_assert_range!(IMM_INDEX, 0..=3);
12376 unsafe extern "unadjusted" {
12377 #[cfg_attr(
12378 target_arch = "aarch64",
12379 link_name = "llvm.aarch64.sve.sqdmlslt.lane.nxv2i64"
12380 )]
12381 fn _svqdmlslt_lane_s64(
12382 op1: svint64_t,
12383 op2: svint32_t,
12384 op3: svint32_t,
12385 IMM_INDEX: i32,
12386 ) -> svint64_t;
12387 }
12388 unsafe { _svqdmlslt_lane_s64(op1, op2, op3, IMM_INDEX) }
12389}
12390#[doc = "Saturating doubling multiply-subtract long (top)"]
12391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s16])"]
12392#[inline(always)]
12393#[target_feature(enable = "sve,sve2")]
12394#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12395#[cfg_attr(test, assert_instr(sqdmlslt))]
12396pub fn svqdmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
12397 unsafe extern "unadjusted" {
12398 #[cfg_attr(
12399 target_arch = "aarch64",
12400 link_name = "llvm.aarch64.sve.sqdmlslt.nxv8i16"
12401 )]
12402 fn _svqdmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
12403 }
12404 unsafe { _svqdmlslt_s16(op1, op2, op3) }
12405}
12406#[doc = "Saturating doubling multiply-subtract long (top)"]
12407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s16])"]
12408#[inline(always)]
12409#[target_feature(enable = "sve,sve2")]
12410#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12411#[cfg_attr(test, assert_instr(sqdmlslt))]
12412pub fn svqdmlslt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
12413 svqdmlslt_s16(op1, op2, svdup_n_s8(op3))
12414}
12415#[doc = "Saturating doubling multiply-subtract long (top)"]
12416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s32])"]
12417#[inline(always)]
12418#[target_feature(enable = "sve,sve2")]
12419#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12420#[cfg_attr(test, assert_instr(sqdmlslt))]
12421pub fn svqdmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
12422 unsafe extern "unadjusted" {
12423 #[cfg_attr(
12424 target_arch = "aarch64",
12425 link_name = "llvm.aarch64.sve.sqdmlslt.nxv4i32"
12426 )]
12427 fn _svqdmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
12428 }
12429 unsafe { _svqdmlslt_s32(op1, op2, op3) }
12430}
12431#[doc = "Saturating doubling multiply-subtract long (top)"]
12432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s32])"]
12433#[inline(always)]
12434#[target_feature(enable = "sve,sve2")]
12435#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12436#[cfg_attr(test, assert_instr(sqdmlslt))]
12437pub fn svqdmlslt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
12438 svqdmlslt_s32(op1, op2, svdup_n_s16(op3))
12439}
12440#[doc = "Saturating doubling multiply-subtract long (top)"]
12441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s64])"]
12442#[inline(always)]
12443#[target_feature(enable = "sve,sve2")]
12444#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12445#[cfg_attr(test, assert_instr(sqdmlslt))]
12446pub fn svqdmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
12447 unsafe extern "unadjusted" {
12448 #[cfg_attr(
12449 target_arch = "aarch64",
12450 link_name = "llvm.aarch64.sve.sqdmlslt.nxv2i64"
12451 )]
12452 fn _svqdmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
12453 }
12454 unsafe { _svqdmlslt_s64(op1, op2, op3) }
12455}
12456#[doc = "Saturating doubling multiply-subtract long (top)"]
12457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s64])"]
12458#[inline(always)]
12459#[target_feature(enable = "sve,sve2")]
12460#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12461#[cfg_attr(test, assert_instr(sqdmlslt))]
12462pub fn svqdmlslt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
12463 svqdmlslt_s64(op1, op2, svdup_n_s32(op3))
12464}
12465#[doc = "Saturating doubling multiply high"]
12466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s16])"]
12467#[inline(always)]
12468#[target_feature(enable = "sve,sve2")]
12469#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12470#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))]
12471pub fn svqdmulh_lane_s16<const IMM_INDEX: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
12472 static_assert_range!(IMM_INDEX, 0..=7);
12473 unsafe extern "unadjusted" {
12474 #[cfg_attr(
12475 target_arch = "aarch64",
12476 link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv8i16"
12477 )]
12478 fn _svqdmulh_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t;
12479 }
12480 unsafe { _svqdmulh_lane_s16(op1, op2, IMM_INDEX) }
12481}
12482#[doc = "Saturating doubling multiply high"]
12483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s32])"]
12484#[inline(always)]
12485#[target_feature(enable = "sve,sve2")]
12486#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12487#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))]
12488pub fn svqdmulh_lane_s32<const IMM_INDEX: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
12489 static_assert_range!(IMM_INDEX, 0..=3);
12490 unsafe extern "unadjusted" {
12491 #[cfg_attr(
12492 target_arch = "aarch64",
12493 link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv4i32"
12494 )]
12495 fn _svqdmulh_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t;
12496 }
12497 unsafe { _svqdmulh_lane_s32(op1, op2, IMM_INDEX) }
12498}
12499#[doc = "Saturating doubling multiply high"]
12500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s64])"]
12501#[inline(always)]
12502#[target_feature(enable = "sve,sve2")]
12503#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12504#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))]
12505pub fn svqdmulh_lane_s64<const IMM_INDEX: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
12506 static_assert_range!(IMM_INDEX, 0..=1);
12507 unsafe extern "unadjusted" {
12508 #[cfg_attr(
12509 target_arch = "aarch64",
12510 link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv2i64"
12511 )]
12512 fn _svqdmulh_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t;
12513 }
12514 unsafe { _svqdmulh_lane_s64(op1, op2, IMM_INDEX) }
12515}
12516#[doc = "Saturating doubling multiply high"]
12517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s8])"]
12518#[inline(always)]
12519#[target_feature(enable = "sve,sve2")]
12520#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12521#[cfg_attr(test, assert_instr(sqdmulh))]
12522pub fn svqdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
12523 unsafe extern "unadjusted" {
12524 #[cfg_attr(
12525 target_arch = "aarch64",
12526 link_name = "llvm.aarch64.sve.sqdmulh.nxv16i8"
12527 )]
12528 fn _svqdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
12529 }
12530 unsafe { _svqdmulh_s8(op1, op2) }
12531}
12532#[doc = "Saturating doubling multiply high"]
12533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s8])"]
12534#[inline(always)]
12535#[target_feature(enable = "sve,sve2")]
12536#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12537#[cfg_attr(test, assert_instr(sqdmulh))]
12538pub fn svqdmulh_n_s8(op1: svint8_t, op2: i8) -> svint8_t {
12539 svqdmulh_s8(op1, svdup_n_s8(op2))
12540}
12541#[doc = "Saturating doubling multiply high"]
12542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s16])"]
12543#[inline(always)]
12544#[target_feature(enable = "sve,sve2")]
12545#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12546#[cfg_attr(test, assert_instr(sqdmulh))]
12547pub fn svqdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
12548 unsafe extern "unadjusted" {
12549 #[cfg_attr(
12550 target_arch = "aarch64",
12551 link_name = "llvm.aarch64.sve.sqdmulh.nxv8i16"
12552 )]
12553 fn _svqdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
12554 }
12555 unsafe { _svqdmulh_s16(op1, op2) }
12556}
12557#[doc = "Saturating doubling multiply high"]
12558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s16])"]
12559#[inline(always)]
12560#[target_feature(enable = "sve,sve2")]
12561#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12562#[cfg_attr(test, assert_instr(sqdmulh))]
12563pub fn svqdmulh_n_s16(op1: svint16_t, op2: i16) -> svint16_t {
12564 svqdmulh_s16(op1, svdup_n_s16(op2))
12565}
12566#[doc = "Saturating doubling multiply high"]
12567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s32])"]
12568#[inline(always)]
12569#[target_feature(enable = "sve,sve2")]
12570#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12571#[cfg_attr(test, assert_instr(sqdmulh))]
12572pub fn svqdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
12573 unsafe extern "unadjusted" {
12574 #[cfg_attr(
12575 target_arch = "aarch64",
12576 link_name = "llvm.aarch64.sve.sqdmulh.nxv4i32"
12577 )]
12578 fn _svqdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
12579 }
12580 unsafe { _svqdmulh_s32(op1, op2) }
12581}
12582#[doc = "Saturating doubling multiply high"]
12583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s32])"]
12584#[inline(always)]
12585#[target_feature(enable = "sve,sve2")]
12586#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12587#[cfg_attr(test, assert_instr(sqdmulh))]
12588pub fn svqdmulh_n_s32(op1: svint32_t, op2: i32) -> svint32_t {
12589 svqdmulh_s32(op1, svdup_n_s32(op2))
12590}
12591#[doc = "Saturating doubling multiply high"]
12592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s64])"]
12593#[inline(always)]
12594#[target_feature(enable = "sve,sve2")]
12595#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12596#[cfg_attr(test, assert_instr(sqdmulh))]
12597pub fn svqdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
12598 unsafe extern "unadjusted" {
12599 #[cfg_attr(
12600 target_arch = "aarch64",
12601 link_name = "llvm.aarch64.sve.sqdmulh.nxv2i64"
12602 )]
12603 fn _svqdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
12604 }
12605 unsafe { _svqdmulh_s64(op1, op2) }
12606}
12607#[doc = "Saturating doubling multiply high"]
12608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s64])"]
12609#[inline(always)]
12610#[target_feature(enable = "sve,sve2")]
12611#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12612#[cfg_attr(test, assert_instr(sqdmulh))]
12613pub fn svqdmulh_n_s64(op1: svint64_t, op2: i64) -> svint64_t {
12614 svqdmulh_s64(op1, svdup_n_s64(op2))
12615}
12616#[doc = "Saturating doubling multiply long (bottom)"]
12617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb_lane[_s32])"]
12618#[inline(always)]
12619#[target_feature(enable = "sve,sve2")]
12620#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12621#[cfg_attr(test, assert_instr(sqdmullb, IMM_INDEX = 0))]
12622pub fn svqdmullb_lane_s32<const IMM_INDEX: i32>(op1: svint16_t, op2: svint16_t) -> svint32_t {
12623 static_assert_range!(IMM_INDEX, 0..=7);
12624 unsafe extern "unadjusted" {
12625 #[cfg_attr(
12626 target_arch = "aarch64",
12627 link_name = "llvm.aarch64.sve.sqdmullb.lane.nxv4i32"
12628 )]
12629 fn _svqdmullb_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t;
12630 }
12631 unsafe { _svqdmullb_lane_s32(op1, op2, IMM_INDEX) }
12632}
12633#[doc = "Saturating doubling multiply long (bottom)"]
12634#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb_lane[_s64])"]
12635#[inline(always)]
12636#[target_feature(enable = "sve,sve2")]
12637#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12638#[cfg_attr(test, assert_instr(sqdmullb, IMM_INDEX = 0))]
12639pub fn svqdmullb_lane_s64<const IMM_INDEX: i32>(op1: svint32_t, op2: svint32_t) -> svint64_t {
12640 static_assert_range!(IMM_INDEX, 0..=3);
12641 unsafe extern "unadjusted" {
12642 #[cfg_attr(
12643 target_arch = "aarch64",
12644 link_name = "llvm.aarch64.sve.sqdmullb.lane.nxv2i64"
12645 )]
12646 fn _svqdmullb_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t;
12647 }
12648 unsafe { _svqdmullb_lane_s64(op1, op2, IMM_INDEX) }
12649}
12650#[doc = "Saturating doubling multiply long (bottom)"]
12651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s16])"]
12652#[inline(always)]
12653#[target_feature(enable = "sve,sve2")]
12654#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12655#[cfg_attr(test, assert_instr(sqdmullb))]
12656pub fn svqdmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
12657 unsafe extern "unadjusted" {
12658 #[cfg_attr(
12659 target_arch = "aarch64",
12660 link_name = "llvm.aarch64.sve.sqdmullb.nxv8i16"
12661 )]
12662 fn _svqdmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
12663 }
12664 unsafe { _svqdmullb_s16(op1, op2) }
12665}
12666#[doc = "Saturating doubling multiply long (bottom)"]
12667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s16])"]
12668#[inline(always)]
12669#[target_feature(enable = "sve,sve2")]
12670#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12671#[cfg_attr(test, assert_instr(sqdmullb))]
12672pub fn svqdmullb_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
12673 svqdmullb_s16(op1, svdup_n_s8(op2))
12674}
12675#[doc = "Saturating doubling multiply long (bottom)"]
12676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s32])"]
12677#[inline(always)]
12678#[target_feature(enable = "sve,sve2")]
12679#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12680#[cfg_attr(test, assert_instr(sqdmullb))]
12681pub fn svqdmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
12682 unsafe extern "unadjusted" {
12683 #[cfg_attr(
12684 target_arch = "aarch64",
12685 link_name = "llvm.aarch64.sve.sqdmullb.nxv4i32"
12686 )]
12687 fn _svqdmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
12688 }
12689 unsafe { _svqdmullb_s32(op1, op2) }
12690}
12691#[doc = "Saturating doubling multiply long (bottom)"]
12692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s32])"]
12693#[inline(always)]
12694#[target_feature(enable = "sve,sve2")]
12695#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12696#[cfg_attr(test, assert_instr(sqdmullb))]
12697pub fn svqdmullb_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
12698 svqdmullb_s32(op1, svdup_n_s16(op2))
12699}
12700#[doc = "Saturating doubling multiply long (bottom)"]
12701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s64])"]
12702#[inline(always)]
12703#[target_feature(enable = "sve,sve2")]
12704#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12705#[cfg_attr(test, assert_instr(sqdmullb))]
12706pub fn svqdmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
12707 unsafe extern "unadjusted" {
12708 #[cfg_attr(
12709 target_arch = "aarch64",
12710 link_name = "llvm.aarch64.sve.sqdmullb.nxv2i64"
12711 )]
12712 fn _svqdmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
12713 }
12714 unsafe { _svqdmullb_s64(op1, op2) }
12715}
12716#[doc = "Saturating doubling multiply long (bottom)"]
12717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s64])"]
12718#[inline(always)]
12719#[target_feature(enable = "sve,sve2")]
12720#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12721#[cfg_attr(test, assert_instr(sqdmullb))]
12722pub fn svqdmullb_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
12723 svqdmullb_s64(op1, svdup_n_s32(op2))
12724}
12725#[doc = "Saturating doubling multiply long (top)"]
12726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt_lane[_s32])"]
12727#[inline(always)]
12728#[target_feature(enable = "sve,sve2")]
12729#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12730#[cfg_attr(test, assert_instr(sqdmullt, IMM_INDEX = 0))]
12731pub fn svqdmullt_lane_s32<const IMM_INDEX: i32>(op1: svint16_t, op2: svint16_t) -> svint32_t {
12732 static_assert_range!(IMM_INDEX, 0..=7);
12733 unsafe extern "unadjusted" {
12734 #[cfg_attr(
12735 target_arch = "aarch64",
12736 link_name = "llvm.aarch64.sve.sqdmullt.lane.nxv4i32"
12737 )]
12738 fn _svqdmullt_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t;
12739 }
12740 unsafe { _svqdmullt_lane_s32(op1, op2, IMM_INDEX) }
12741}
12742#[doc = "Saturating doubling multiply long (top)"]
12743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt_lane[_s64])"]
12744#[inline(always)]
12745#[target_feature(enable = "sve,sve2")]
12746#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12747#[cfg_attr(test, assert_instr(sqdmullt, IMM_INDEX = 0))]
12748pub fn svqdmullt_lane_s64<const IMM_INDEX: i32>(op1: svint32_t, op2: svint32_t) -> svint64_t {
12749 static_assert_range!(IMM_INDEX, 0..=3);
12750 unsafe extern "unadjusted" {
12751 #[cfg_attr(
12752 target_arch = "aarch64",
12753 link_name = "llvm.aarch64.sve.sqdmullt.lane.nxv2i64"
12754 )]
12755 fn _svqdmullt_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t;
12756 }
12757 unsafe { _svqdmullt_lane_s64(op1, op2, IMM_INDEX) }
12758}
12759#[doc = "Saturating doubling multiply long (top)"]
12760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s16])"]
12761#[inline(always)]
12762#[target_feature(enable = "sve,sve2")]
12763#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12764#[cfg_attr(test, assert_instr(sqdmullt))]
12765pub fn svqdmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
12766 unsafe extern "unadjusted" {
12767 #[cfg_attr(
12768 target_arch = "aarch64",
12769 link_name = "llvm.aarch64.sve.sqdmullt.nxv8i16"
12770 )]
12771 fn _svqdmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
12772 }
12773 unsafe { _svqdmullt_s16(op1, op2) }
12774}
12775#[doc = "Saturating doubling multiply long (top)"]
12776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s16])"]
12777#[inline(always)]
12778#[target_feature(enable = "sve,sve2")]
12779#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12780#[cfg_attr(test, assert_instr(sqdmullt))]
12781pub fn svqdmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
12782 svqdmullt_s16(op1, svdup_n_s8(op2))
12783}
12784#[doc = "Saturating doubling multiply long (top)"]
12785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s32])"]
12786#[inline(always)]
12787#[target_feature(enable = "sve,sve2")]
12788#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12789#[cfg_attr(test, assert_instr(sqdmullt))]
12790pub fn svqdmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
12791 unsafe extern "unadjusted" {
12792 #[cfg_attr(
12793 target_arch = "aarch64",
12794 link_name = "llvm.aarch64.sve.sqdmullt.nxv4i32"
12795 )]
12796 fn _svqdmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
12797 }
12798 unsafe { _svqdmullt_s32(op1, op2) }
12799}
12800#[doc = "Saturating doubling multiply long (top)"]
12801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s32])"]
12802#[inline(always)]
12803#[target_feature(enable = "sve,sve2")]
12804#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12805#[cfg_attr(test, assert_instr(sqdmullt))]
12806pub fn svqdmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
12807 svqdmullt_s32(op1, svdup_n_s16(op2))
12808}
12809#[doc = "Saturating doubling multiply long (top)"]
12810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s64])"]
12811#[inline(always)]
12812#[target_feature(enable = "sve,sve2")]
12813#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12814#[cfg_attr(test, assert_instr(sqdmullt))]
12815pub fn svqdmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
12816 unsafe extern "unadjusted" {
12817 #[cfg_attr(
12818 target_arch = "aarch64",
12819 link_name = "llvm.aarch64.sve.sqdmullt.nxv2i64"
12820 )]
12821 fn _svqdmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
12822 }
12823 unsafe { _svqdmullt_s64(op1, op2) }
12824}
12825#[doc = "Saturating doubling multiply long (top)"]
12826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s64])"]
12827#[inline(always)]
12828#[target_feature(enable = "sve,sve2")]
12829#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12830#[cfg_attr(test, assert_instr(sqdmullt))]
12831pub fn svqdmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
12832 svqdmullt_s64(op1, svdup_n_s32(op2))
12833}
12834#[doc = "Saturating negate"]
12835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_m)"]
12836#[inline(always)]
12837#[target_feature(enable = "sve,sve2")]
12838#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12839#[cfg_attr(test, assert_instr(sqneg))]
12840pub fn svqneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t {
12841 unsafe extern "unadjusted" {
12842 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv16i8")]
12843 fn _svqneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
12844 }
12845 unsafe { _svqneg_s8_m(inactive, pg, op) }
12846}
12847#[doc = "Saturating negate"]
12848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_x)"]
12849#[inline(always)]
12850#[target_feature(enable = "sve,sve2")]
12851#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12852#[cfg_attr(test, assert_instr(sqneg))]
12853pub fn svqneg_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t {
12854 svqneg_s8_m(op, pg, op)
12855}
12856#[doc = "Saturating negate"]
12857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_z)"]
12858#[inline(always)]
12859#[target_feature(enable = "sve,sve2")]
12860#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12861#[cfg_attr(test, assert_instr(sqneg))]
12862pub fn svqneg_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t {
12863 svqneg_s8_m(svdup_n_s8(0), pg, op)
12864}
12865#[doc = "Saturating negate"]
12866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_m)"]
12867#[inline(always)]
12868#[target_feature(enable = "sve,sve2")]
12869#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12870#[cfg_attr(test, assert_instr(sqneg))]
12871pub fn svqneg_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
12872 unsafe extern "unadjusted" {
12873 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv8i16")]
12874 fn _svqneg_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
12875 }
12876 unsafe { _svqneg_s16_m(inactive, pg.sve_into(), op) }
12877}
12878#[doc = "Saturating negate"]
12879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_x)"]
12880#[inline(always)]
12881#[target_feature(enable = "sve,sve2")]
12882#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12883#[cfg_attr(test, assert_instr(sqneg))]
12884pub fn svqneg_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
12885 svqneg_s16_m(op, pg, op)
12886}
12887#[doc = "Saturating negate"]
12888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_z)"]
12889#[inline(always)]
12890#[target_feature(enable = "sve,sve2")]
12891#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12892#[cfg_attr(test, assert_instr(sqneg))]
12893pub fn svqneg_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
12894 svqneg_s16_m(svdup_n_s16(0), pg, op)
12895}
12896#[doc = "Saturating negate"]
12897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_m)"]
12898#[inline(always)]
12899#[target_feature(enable = "sve,sve2")]
12900#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12901#[cfg_attr(test, assert_instr(sqneg))]
12902pub fn svqneg_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
12903 unsafe extern "unadjusted" {
12904 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv4i32")]
12905 fn _svqneg_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
12906 }
12907 unsafe { _svqneg_s32_m(inactive, pg.sve_into(), op) }
12908}
12909#[doc = "Saturating negate"]
12910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_x)"]
12911#[inline(always)]
12912#[target_feature(enable = "sve,sve2")]
12913#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12914#[cfg_attr(test, assert_instr(sqneg))]
12915pub fn svqneg_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
12916 svqneg_s32_m(op, pg, op)
12917}
12918#[doc = "Saturating negate"]
12919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_z)"]
12920#[inline(always)]
12921#[target_feature(enable = "sve,sve2")]
12922#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12923#[cfg_attr(test, assert_instr(sqneg))]
12924pub fn svqneg_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
12925 svqneg_s32_m(svdup_n_s32(0), pg, op)
12926}
12927#[doc = "Saturating negate"]
12928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_m)"]
12929#[inline(always)]
12930#[target_feature(enable = "sve,sve2")]
12931#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12932#[cfg_attr(test, assert_instr(sqneg))]
12933pub fn svqneg_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
12934 unsafe extern "unadjusted" {
12935 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv2i64")]
12936 fn _svqneg_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
12937 }
12938 unsafe { _svqneg_s64_m(inactive, pg.sve_into(), op) }
12939}
12940#[doc = "Saturating negate"]
12941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_x)"]
12942#[inline(always)]
12943#[target_feature(enable = "sve,sve2")]
12944#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12945#[cfg_attr(test, assert_instr(sqneg))]
12946pub fn svqneg_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
12947 svqneg_s64_m(op, pg, op)
12948}
12949#[doc = "Saturating negate"]
12950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_z)"]
12951#[inline(always)]
12952#[target_feature(enable = "sve,sve2")]
12953#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12954#[cfg_attr(test, assert_instr(sqneg))]
12955pub fn svqneg_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
12956 svqneg_s64_m(svdup_n_s64(0), pg, op)
12957}
12958#[doc = "Saturating rounding doubling complex multiply-add high with rotate"]
12959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah_lane[_s16])"]
12960#[inline(always)]
12961#[target_feature(enable = "sve,sve2")]
12962#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12963#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_INDEX = 0, IMM_ROTATION = 90))]
12964pub fn svqrdcmlah_lane_s16<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
12965 op1: svint16_t,
12966 op2: svint16_t,
12967 op3: svint16_t,
12968) -> svint16_t {
12969 static_assert_range!(IMM_INDEX, 0..=3);
12970 static_assert!(
12971 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
12972 );
12973 unsafe extern "unadjusted" {
12974 #[cfg_attr(
12975 target_arch = "aarch64",
12976 link_name = "llvm.aarch64.sve.sqrdcmlah.lane.x.nxv8i16"
12977 )]
12978 fn _svqrdcmlah_lane_s16(
12979 op1: svint16_t,
12980 op2: svint16_t,
12981 op3: svint16_t,
12982 imm_index: i32,
12983 imm_rotation: i32,
12984 ) -> svint16_t;
12985 }
12986 unsafe { _svqrdcmlah_lane_s16(op1, op2, op3, IMM_INDEX, IMM_ROTATION) }
12987}
12988#[doc = "Saturating rounding doubling complex multiply-add high with rotate"]
12989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah_lane[_s32])"]
12990#[inline(always)]
12991#[target_feature(enable = "sve,sve2")]
12992#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12993#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_INDEX = 0, IMM_ROTATION = 90))]
12994pub fn svqrdcmlah_lane_s32<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
12995 op1: svint32_t,
12996 op2: svint32_t,
12997 op3: svint32_t,
12998) -> svint32_t {
12999 static_assert_range!(IMM_INDEX, 0..=1);
13000 static_assert!(
13001 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
13002 );
13003 unsafe extern "unadjusted" {
13004 #[cfg_attr(
13005 target_arch = "aarch64",
13006 link_name = "llvm.aarch64.sve.sqrdcmlah.lane.x.nxv4i32"
13007 )]
13008 fn _svqrdcmlah_lane_s32(
13009 op1: svint32_t,
13010 op2: svint32_t,
13011 op3: svint32_t,
13012 imm_index: i32,
13013 imm_rotation: i32,
13014 ) -> svint32_t;
13015 }
13016 unsafe { _svqrdcmlah_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) }
13017}
13018#[doc = "Saturating rounding doubling complex multiply-add high with rotate"]
13019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s8])"]
13020#[inline(always)]
13021#[target_feature(enable = "sve,sve2")]
13022#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13023#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))]
13024pub fn svqrdcmlah_s8<const IMM_ROTATION: i32>(
13025 op1: svint8_t,
13026 op2: svint8_t,
13027 op3: svint8_t,
13028) -> svint8_t {
13029 static_assert!(
13030 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
13031 );
13032 unsafe extern "unadjusted" {
13033 #[cfg_attr(
13034 target_arch = "aarch64",
13035 link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv16i8"
13036 )]
13037 fn _svqrdcmlah_s8(
13038 op1: svint8_t,
13039 op2: svint8_t,
13040 op3: svint8_t,
13041 imm_rotation: i32,
13042 ) -> svint8_t;
13043 }
13044 unsafe { _svqrdcmlah_s8(op1, op2, op3, IMM_ROTATION) }
13045}
13046#[doc = "Saturating rounding doubling complex multiply-add high with rotate"]
13047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s16])"]
13048#[inline(always)]
13049#[target_feature(enable = "sve,sve2")]
13050#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13051#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))]
13052pub fn svqrdcmlah_s16<const IMM_ROTATION: i32>(
13053 op1: svint16_t,
13054 op2: svint16_t,
13055 op3: svint16_t,
13056) -> svint16_t {
13057 static_assert!(
13058 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
13059 );
13060 unsafe extern "unadjusted" {
13061 #[cfg_attr(
13062 target_arch = "aarch64",
13063 link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv8i16"
13064 )]
13065 fn _svqrdcmlah_s16(
13066 op1: svint16_t,
13067 op2: svint16_t,
13068 op3: svint16_t,
13069 imm_rotation: i32,
13070 ) -> svint16_t;
13071 }
13072 unsafe { _svqrdcmlah_s16(op1, op2, op3, IMM_ROTATION) }
13073}
13074#[doc = "Saturating rounding doubling complex multiply-add high with rotate"]
13075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s32])"]
13076#[inline(always)]
13077#[target_feature(enable = "sve,sve2")]
13078#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13079#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))]
13080pub fn svqrdcmlah_s32<const IMM_ROTATION: i32>(
13081 op1: svint32_t,
13082 op2: svint32_t,
13083 op3: svint32_t,
13084) -> svint32_t {
13085 static_assert!(
13086 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
13087 );
13088 unsafe extern "unadjusted" {
13089 #[cfg_attr(
13090 target_arch = "aarch64",
13091 link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv4i32"
13092 )]
13093 fn _svqrdcmlah_s32(
13094 op1: svint32_t,
13095 op2: svint32_t,
13096 op3: svint32_t,
13097 imm_rotation: i32,
13098 ) -> svint32_t;
13099 }
13100 unsafe { _svqrdcmlah_s32(op1, op2, op3, IMM_ROTATION) }
13101}
13102#[doc = "Saturating rounding doubling complex multiply-add high with rotate"]
13103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s64])"]
13104#[inline(always)]
13105#[target_feature(enable = "sve,sve2")]
13106#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13107#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))]
13108pub fn svqrdcmlah_s64<const IMM_ROTATION: i32>(
13109 op1: svint64_t,
13110 op2: svint64_t,
13111 op3: svint64_t,
13112) -> svint64_t {
13113 static_assert!(
13114 IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
13115 );
13116 unsafe extern "unadjusted" {
13117 #[cfg_attr(
13118 target_arch = "aarch64",
13119 link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv2i64"
13120 )]
13121 fn _svqrdcmlah_s64(
13122 op1: svint64_t,
13123 op2: svint64_t,
13124 op3: svint64_t,
13125 imm_rotation: i32,
13126 ) -> svint64_t;
13127 }
13128 unsafe { _svqrdcmlah_s64(op1, op2, op3, IMM_ROTATION) }
13129}
13130#[doc = "Saturating rounding doubling multiply-add high"]
13131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s16])"]
13132#[inline(always)]
13133#[target_feature(enable = "sve,sve2")]
13134#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13135#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))]
13136pub fn svqrdmlah_lane_s16<const IMM_INDEX: i32>(
13137 op1: svint16_t,
13138 op2: svint16_t,
13139 op3: svint16_t,
13140) -> svint16_t {
13141 static_assert_range!(IMM_INDEX, 0..=7);
13142 unsafe extern "unadjusted" {
13143 #[cfg_attr(
13144 target_arch = "aarch64",
13145 link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv8i16"
13146 )]
13147 fn _svqrdmlah_lane_s16(
13148 op1: svint16_t,
13149 op2: svint16_t,
13150 op3: svint16_t,
13151 IMM_INDEX: i32,
13152 ) -> svint16_t;
13153 }
13154 unsafe { _svqrdmlah_lane_s16(op1, op2, op3, IMM_INDEX) }
13155}
13156#[doc = "Saturating rounding doubling multiply-add high"]
13157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s32])"]
13158#[inline(always)]
13159#[target_feature(enable = "sve,sve2")]
13160#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13161#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))]
13162pub fn svqrdmlah_lane_s32<const IMM_INDEX: i32>(
13163 op1: svint32_t,
13164 op2: svint32_t,
13165 op3: svint32_t,
13166) -> svint32_t {
13167 static_assert_range!(IMM_INDEX, 0..=3);
13168 unsafe extern "unadjusted" {
13169 #[cfg_attr(
13170 target_arch = "aarch64",
13171 link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv4i32"
13172 )]
13173 fn _svqrdmlah_lane_s32(
13174 op1: svint32_t,
13175 op2: svint32_t,
13176 op3: svint32_t,
13177 IMM_INDEX: i32,
13178 ) -> svint32_t;
13179 }
13180 unsafe { _svqrdmlah_lane_s32(op1, op2, op3, IMM_INDEX) }
13181}
13182#[doc = "Saturating rounding doubling multiply-add high"]
13183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s64])"]
13184#[inline(always)]
13185#[target_feature(enable = "sve,sve2")]
13186#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13187#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))]
13188pub fn svqrdmlah_lane_s64<const IMM_INDEX: i32>(
13189 op1: svint64_t,
13190 op2: svint64_t,
13191 op3: svint64_t,
13192) -> svint64_t {
13193 static_assert_range!(IMM_INDEX, 0..=1);
13194 unsafe extern "unadjusted" {
13195 #[cfg_attr(
13196 target_arch = "aarch64",
13197 link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv2i64"
13198 )]
13199 fn _svqrdmlah_lane_s64(
13200 op1: svint64_t,
13201 op2: svint64_t,
13202 op3: svint64_t,
13203 IMM_INDEX: i32,
13204 ) -> svint64_t;
13205 }
13206 unsafe { _svqrdmlah_lane_s64(op1, op2, op3, IMM_INDEX) }
13207}
13208#[doc = "Saturating rounding doubling multiply-add high"]
13209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s8])"]
13210#[inline(always)]
13211#[target_feature(enable = "sve,sve2")]
13212#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13213#[cfg_attr(test, assert_instr(sqrdmlah))]
13214pub fn svqrdmlah_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
13215 unsafe extern "unadjusted" {
13216 #[cfg_attr(
13217 target_arch = "aarch64",
13218 link_name = "llvm.aarch64.sve.sqrdmlah.nxv16i8"
13219 )]
13220 fn _svqrdmlah_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
13221 }
13222 unsafe { _svqrdmlah_s8(op1, op2, op3) }
13223}
13224#[doc = "Saturating rounding doubling multiply-add high"]
13225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s8])"]
13226#[inline(always)]
13227#[target_feature(enable = "sve,sve2")]
13228#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13229#[cfg_attr(test, assert_instr(sqrdmlah))]
13230pub fn svqrdmlah_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
13231 svqrdmlah_s8(op1, op2, svdup_n_s8(op3))
13232}
13233#[doc = "Saturating rounding doubling multiply-add high"]
13234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s16])"]
13235#[inline(always)]
13236#[target_feature(enable = "sve,sve2")]
13237#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13238#[cfg_attr(test, assert_instr(sqrdmlah))]
13239pub fn svqrdmlah_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
13240 unsafe extern "unadjusted" {
13241 #[cfg_attr(
13242 target_arch = "aarch64",
13243 link_name = "llvm.aarch64.sve.sqrdmlah.nxv8i16"
13244 )]
13245 fn _svqrdmlah_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
13246 }
13247 unsafe { _svqrdmlah_s16(op1, op2, op3) }
13248}
13249#[doc = "Saturating rounding doubling multiply-add high"]
13250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s16])"]
13251#[inline(always)]
13252#[target_feature(enable = "sve,sve2")]
13253#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13254#[cfg_attr(test, assert_instr(sqrdmlah))]
13255pub fn svqrdmlah_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
13256 svqrdmlah_s16(op1, op2, svdup_n_s16(op3))
13257}
13258#[doc = "Saturating rounding doubling multiply-add high"]
13259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s32])"]
13260#[inline(always)]
13261#[target_feature(enable = "sve,sve2")]
13262#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13263#[cfg_attr(test, assert_instr(sqrdmlah))]
13264pub fn svqrdmlah_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
13265 unsafe extern "unadjusted" {
13266 #[cfg_attr(
13267 target_arch = "aarch64",
13268 link_name = "llvm.aarch64.sve.sqrdmlah.nxv4i32"
13269 )]
13270 fn _svqrdmlah_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
13271 }
13272 unsafe { _svqrdmlah_s32(op1, op2, op3) }
13273}
13274#[doc = "Saturating rounding doubling multiply-add high"]
13275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s32])"]
13276#[inline(always)]
13277#[target_feature(enable = "sve,sve2")]
13278#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13279#[cfg_attr(test, assert_instr(sqrdmlah))]
13280pub fn svqrdmlah_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
13281 svqrdmlah_s32(op1, op2, svdup_n_s32(op3))
13282}
13283#[doc = "Saturating rounding doubling multiply-add high"]
13284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s64])"]
13285#[inline(always)]
13286#[target_feature(enable = "sve,sve2")]
13287#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13288#[cfg_attr(test, assert_instr(sqrdmlah))]
13289pub fn svqrdmlah_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
13290 unsafe extern "unadjusted" {
13291 #[cfg_attr(
13292 target_arch = "aarch64",
13293 link_name = "llvm.aarch64.sve.sqrdmlah.nxv2i64"
13294 )]
13295 fn _svqrdmlah_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
13296 }
13297 unsafe { _svqrdmlah_s64(op1, op2, op3) }
13298}
13299#[doc = "Saturating rounding doubling multiply-add high"]
13300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s64])"]
13301#[inline(always)]
13302#[target_feature(enable = "sve,sve2")]
13303#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13304#[cfg_attr(test, assert_instr(sqrdmlah))]
13305pub fn svqrdmlah_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
13306 svqrdmlah_s64(op1, op2, svdup_n_s64(op3))
13307}
13308#[doc = "Saturating rounding doubling multiply-subtract high"]
13309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s16])"]
13310#[inline(always)]
13311#[target_feature(enable = "sve,sve2")]
13312#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13313#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))]
13314pub fn svqrdmlsh_lane_s16<const IMM_INDEX: i32>(
13315 op1: svint16_t,
13316 op2: svint16_t,
13317 op3: svint16_t,
13318) -> svint16_t {
13319 static_assert_range!(IMM_INDEX, 0..=7);
13320 unsafe extern "unadjusted" {
13321 #[cfg_attr(
13322 target_arch = "aarch64",
13323 link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv8i16"
13324 )]
13325 fn _svqrdmlsh_lane_s16(
13326 op1: svint16_t,
13327 op2: svint16_t,
13328 op3: svint16_t,
13329 IMM_INDEX: i32,
13330 ) -> svint16_t;
13331 }
13332 unsafe { _svqrdmlsh_lane_s16(op1, op2, op3, IMM_INDEX) }
13333}
13334#[doc = "Saturating rounding doubling multiply-subtract high"]
13335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s32])"]
13336#[inline(always)]
13337#[target_feature(enable = "sve,sve2")]
13338#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13339#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))]
13340pub fn svqrdmlsh_lane_s32<const IMM_INDEX: i32>(
13341 op1: svint32_t,
13342 op2: svint32_t,
13343 op3: svint32_t,
13344) -> svint32_t {
13345 static_assert_range!(IMM_INDEX, 0..=3);
13346 unsafe extern "unadjusted" {
13347 #[cfg_attr(
13348 target_arch = "aarch64",
13349 link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv4i32"
13350 )]
13351 fn _svqrdmlsh_lane_s32(
13352 op1: svint32_t,
13353 op2: svint32_t,
13354 op3: svint32_t,
13355 IMM_INDEX: i32,
13356 ) -> svint32_t;
13357 }
13358 unsafe { _svqrdmlsh_lane_s32(op1, op2, op3, IMM_INDEX) }
13359}
13360#[doc = "Saturating rounding doubling multiply-subtract high"]
13361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s64])"]
13362#[inline(always)]
13363#[target_feature(enable = "sve,sve2")]
13364#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13365#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))]
13366pub fn svqrdmlsh_lane_s64<const IMM_INDEX: i32>(
13367 op1: svint64_t,
13368 op2: svint64_t,
13369 op3: svint64_t,
13370) -> svint64_t {
13371 static_assert_range!(IMM_INDEX, 0..=1);
13372 unsafe extern "unadjusted" {
13373 #[cfg_attr(
13374 target_arch = "aarch64",
13375 link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv2i64"
13376 )]
13377 fn _svqrdmlsh_lane_s64(
13378 op1: svint64_t,
13379 op2: svint64_t,
13380 op3: svint64_t,
13381 IMM_INDEX: i32,
13382 ) -> svint64_t;
13383 }
13384 unsafe { _svqrdmlsh_lane_s64(op1, op2, op3, IMM_INDEX) }
13385}
13386#[doc = "Saturating rounding doubling multiply-subtract high"]
13387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s8])"]
13388#[inline(always)]
13389#[target_feature(enable = "sve,sve2")]
13390#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13391#[cfg_attr(test, assert_instr(sqrdmlsh))]
13392pub fn svqrdmlsh_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
13393 unsafe extern "unadjusted" {
13394 #[cfg_attr(
13395 target_arch = "aarch64",
13396 link_name = "llvm.aarch64.sve.sqrdmlsh.nxv16i8"
13397 )]
13398 fn _svqrdmlsh_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
13399 }
13400 unsafe { _svqrdmlsh_s8(op1, op2, op3) }
13401}
13402#[doc = "Saturating rounding doubling multiply-subtract high"]
13403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s8])"]
13404#[inline(always)]
13405#[target_feature(enable = "sve,sve2")]
13406#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13407#[cfg_attr(test, assert_instr(sqrdmlsh))]
13408pub fn svqrdmlsh_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
13409 svqrdmlsh_s8(op1, op2, svdup_n_s8(op3))
13410}
13411#[doc = "Saturating rounding doubling multiply-subtract high"]
13412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s16])"]
13413#[inline(always)]
13414#[target_feature(enable = "sve,sve2")]
13415#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13416#[cfg_attr(test, assert_instr(sqrdmlsh))]
13417pub fn svqrdmlsh_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
13418 unsafe extern "unadjusted" {
13419 #[cfg_attr(
13420 target_arch = "aarch64",
13421 link_name = "llvm.aarch64.sve.sqrdmlsh.nxv8i16"
13422 )]
13423 fn _svqrdmlsh_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
13424 }
13425 unsafe { _svqrdmlsh_s16(op1, op2, op3) }
13426}
13427#[doc = "Saturating rounding doubling multiply-subtract high"]
13428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s16])"]
13429#[inline(always)]
13430#[target_feature(enable = "sve,sve2")]
13431#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13432#[cfg_attr(test, assert_instr(sqrdmlsh))]
13433pub fn svqrdmlsh_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
13434 svqrdmlsh_s16(op1, op2, svdup_n_s16(op3))
13435}
13436#[doc = "Saturating rounding doubling multiply-subtract high"]
13437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s32])"]
13438#[inline(always)]
13439#[target_feature(enable = "sve,sve2")]
13440#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13441#[cfg_attr(test, assert_instr(sqrdmlsh))]
13442pub fn svqrdmlsh_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
13443 unsafe extern "unadjusted" {
13444 #[cfg_attr(
13445 target_arch = "aarch64",
13446 link_name = "llvm.aarch64.sve.sqrdmlsh.nxv4i32"
13447 )]
13448 fn _svqrdmlsh_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
13449 }
13450 unsafe { _svqrdmlsh_s32(op1, op2, op3) }
13451}
13452#[doc = "Saturating rounding doubling multiply-subtract high"]
13453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s32])"]
13454#[inline(always)]
13455#[target_feature(enable = "sve,sve2")]
13456#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13457#[cfg_attr(test, assert_instr(sqrdmlsh))]
13458pub fn svqrdmlsh_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
13459 svqrdmlsh_s32(op1, op2, svdup_n_s32(op3))
13460}
13461#[doc = "Saturating rounding doubling multiply-subtract high"]
13462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s64])"]
13463#[inline(always)]
13464#[target_feature(enable = "sve,sve2")]
13465#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13466#[cfg_attr(test, assert_instr(sqrdmlsh))]
13467pub fn svqrdmlsh_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
13468 unsafe extern "unadjusted" {
13469 #[cfg_attr(
13470 target_arch = "aarch64",
13471 link_name = "llvm.aarch64.sve.sqrdmlsh.nxv2i64"
13472 )]
13473 fn _svqrdmlsh_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
13474 }
13475 unsafe { _svqrdmlsh_s64(op1, op2, op3) }
13476}
13477#[doc = "Saturating rounding doubling multiply-subtract high"]
13478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s64])"]
13479#[inline(always)]
13480#[target_feature(enable = "sve,sve2")]
13481#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13482#[cfg_attr(test, assert_instr(sqrdmlsh))]
13483pub fn svqrdmlsh_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
13484 svqrdmlsh_s64(op1, op2, svdup_n_s64(op3))
13485}
13486#[doc = "Saturating rounding doubling multiply high"]
13487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s16])"]
13488#[inline(always)]
13489#[target_feature(enable = "sve,sve2")]
13490#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13491#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))]
13492pub fn svqrdmulh_lane_s16<const IMM_INDEX: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
13493 static_assert_range!(IMM_INDEX, 0..=7);
13494 unsafe extern "unadjusted" {
13495 #[cfg_attr(
13496 target_arch = "aarch64",
13497 link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv8i16"
13498 )]
13499 fn _svqrdmulh_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t;
13500 }
13501 unsafe { _svqrdmulh_lane_s16(op1, op2, IMM_INDEX) }
13502}
13503#[doc = "Saturating rounding doubling multiply high"]
13504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s32])"]
13505#[inline(always)]
13506#[target_feature(enable = "sve,sve2")]
13507#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13508#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))]
13509pub fn svqrdmulh_lane_s32<const IMM_INDEX: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
13510 static_assert_range!(IMM_INDEX, 0..=3);
13511 unsafe extern "unadjusted" {
13512 #[cfg_attr(
13513 target_arch = "aarch64",
13514 link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv4i32"
13515 )]
13516 fn _svqrdmulh_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t;
13517 }
13518 unsafe { _svqrdmulh_lane_s32(op1, op2, IMM_INDEX) }
13519}
13520#[doc = "Saturating rounding doubling multiply high"]
13521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s64])"]
13522#[inline(always)]
13523#[target_feature(enable = "sve,sve2")]
13524#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13525#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))]
13526pub fn svqrdmulh_lane_s64<const IMM_INDEX: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
13527 static_assert_range!(IMM_INDEX, 0..=1);
13528 unsafe extern "unadjusted" {
13529 #[cfg_attr(
13530 target_arch = "aarch64",
13531 link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv2i64"
13532 )]
13533 fn _svqrdmulh_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t;
13534 }
13535 unsafe { _svqrdmulh_lane_s64(op1, op2, IMM_INDEX) }
13536}
13537#[doc = "Saturating rounding doubling multiply high"]
13538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s8])"]
13539#[inline(always)]
13540#[target_feature(enable = "sve,sve2")]
13541#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13542#[cfg_attr(test, assert_instr(sqrdmulh))]
13543pub fn svqrdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
13544 unsafe extern "unadjusted" {
13545 #[cfg_attr(
13546 target_arch = "aarch64",
13547 link_name = "llvm.aarch64.sve.sqrdmulh.nxv16i8"
13548 )]
13549 fn _svqrdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
13550 }
13551 unsafe { _svqrdmulh_s8(op1, op2) }
13552}
13553#[doc = "Saturating rounding doubling multiply high"]
13554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s8])"]
13555#[inline(always)]
13556#[target_feature(enable = "sve,sve2")]
13557#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13558#[cfg_attr(test, assert_instr(sqrdmulh))]
13559pub fn svqrdmulh_n_s8(op1: svint8_t, op2: i8) -> svint8_t {
13560 svqrdmulh_s8(op1, svdup_n_s8(op2))
13561}
13562#[doc = "Saturating rounding doubling multiply high"]
13563#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s16])"]
13564#[inline(always)]
13565#[target_feature(enable = "sve,sve2")]
13566#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13567#[cfg_attr(test, assert_instr(sqrdmulh))]
13568pub fn svqrdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
13569 unsafe extern "unadjusted" {
13570 #[cfg_attr(
13571 target_arch = "aarch64",
13572 link_name = "llvm.aarch64.sve.sqrdmulh.nxv8i16"
13573 )]
13574 fn _svqrdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
13575 }
13576 unsafe { _svqrdmulh_s16(op1, op2) }
13577}
13578#[doc = "Saturating rounding doubling multiply high"]
13579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s16])"]
13580#[inline(always)]
13581#[target_feature(enable = "sve,sve2")]
13582#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13583#[cfg_attr(test, assert_instr(sqrdmulh))]
13584pub fn svqrdmulh_n_s16(op1: svint16_t, op2: i16) -> svint16_t {
13585 svqrdmulh_s16(op1, svdup_n_s16(op2))
13586}
13587#[doc = "Saturating rounding doubling multiply high"]
13588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s32])"]
13589#[inline(always)]
13590#[target_feature(enable = "sve,sve2")]
13591#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13592#[cfg_attr(test, assert_instr(sqrdmulh))]
13593pub fn svqrdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
13594 unsafe extern "unadjusted" {
13595 #[cfg_attr(
13596 target_arch = "aarch64",
13597 link_name = "llvm.aarch64.sve.sqrdmulh.nxv4i32"
13598 )]
13599 fn _svqrdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
13600 }
13601 unsafe { _svqrdmulh_s32(op1, op2) }
13602}
13603#[doc = "Saturating rounding doubling multiply high"]
13604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s32])"]
13605#[inline(always)]
13606#[target_feature(enable = "sve,sve2")]
13607#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13608#[cfg_attr(test, assert_instr(sqrdmulh))]
13609pub fn svqrdmulh_n_s32(op1: svint32_t, op2: i32) -> svint32_t {
13610 svqrdmulh_s32(op1, svdup_n_s32(op2))
13611}
13612#[doc = "Saturating rounding doubling multiply high"]
13613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s64])"]
13614#[inline(always)]
13615#[target_feature(enable = "sve,sve2")]
13616#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13617#[cfg_attr(test, assert_instr(sqrdmulh))]
13618pub fn svqrdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
13619 unsafe extern "unadjusted" {
13620 #[cfg_attr(
13621 target_arch = "aarch64",
13622 link_name = "llvm.aarch64.sve.sqrdmulh.nxv2i64"
13623 )]
13624 fn _svqrdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
13625 }
13626 unsafe { _svqrdmulh_s64(op1, op2) }
13627}
13628#[doc = "Saturating rounding doubling multiply high"]
13629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s64])"]
13630#[inline(always)]
13631#[target_feature(enable = "sve,sve2")]
13632#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13633#[cfg_attr(test, assert_instr(sqrdmulh))]
13634pub fn svqrdmulh_n_s64(op1: svint64_t, op2: i64) -> svint64_t {
13635 svqrdmulh_s64(op1, svdup_n_s64(op2))
13636}
13637#[doc = "Saturating rounding shift left"]
13638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_m)"]
13639#[inline(always)]
13640#[target_feature(enable = "sve,sve2")]
13641#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13642#[cfg_attr(test, assert_instr(sqrshl))]
13643pub fn svqrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
13644 unsafe extern "unadjusted" {
13645 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv16i8")]
13646 fn _svqrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
13647 }
13648 unsafe { _svqrshl_s8_m(pg, op1, op2) }
13649}
13650#[doc = "Saturating rounding shift left"]
13651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_m)"]
13652#[inline(always)]
13653#[target_feature(enable = "sve,sve2")]
13654#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13655#[cfg_attr(test, assert_instr(sqrshl))]
13656pub fn svqrshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
13657 svqrshl_s8_m(pg, op1, svdup_n_s8(op2))
13658}
13659#[doc = "Saturating rounding shift left"]
13660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_x)"]
13661#[inline(always)]
13662#[target_feature(enable = "sve,sve2")]
13663#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13664#[cfg_attr(test, assert_instr(sqrshl))]
13665pub fn svqrshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
13666 svqrshl_s8_m(pg, op1, op2)
13667}
13668#[doc = "Saturating rounding shift left"]
13669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_x)"]
13670#[inline(always)]
13671#[target_feature(enable = "sve,sve2")]
13672#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13673#[cfg_attr(test, assert_instr(sqrshl))]
13674pub fn svqrshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
13675 svqrshl_s8_x(pg, op1, svdup_n_s8(op2))
13676}
13677#[doc = "Saturating rounding shift left"]
13678#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_z)"]
13679#[inline(always)]
13680#[target_feature(enable = "sve,sve2")]
13681#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13682#[cfg_attr(test, assert_instr(sqrshl))]
13683pub fn svqrshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
13684 svqrshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
13685}
13686#[doc = "Saturating rounding shift left"]
13687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_z)"]
13688#[inline(always)]
13689#[target_feature(enable = "sve,sve2")]
13690#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13691#[cfg_attr(test, assert_instr(sqrshl))]
13692pub fn svqrshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
13693 svqrshl_s8_z(pg, op1, svdup_n_s8(op2))
13694}
13695#[doc = "Saturating rounding shift left"]
13696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_m)"]
13697#[inline(always)]
13698#[target_feature(enable = "sve,sve2")]
13699#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13700#[cfg_attr(test, assert_instr(sqrshl))]
13701pub fn svqrshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
13702 unsafe extern "unadjusted" {
13703 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv8i16")]
13704 fn _svqrshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
13705 }
13706 unsafe { _svqrshl_s16_m(pg.sve_into(), op1, op2) }
13707}
13708#[doc = "Saturating rounding shift left"]
13709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_m)"]
13710#[inline(always)]
13711#[target_feature(enable = "sve,sve2")]
13712#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13713#[cfg_attr(test, assert_instr(sqrshl))]
13714pub fn svqrshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
13715 svqrshl_s16_m(pg, op1, svdup_n_s16(op2))
13716}
13717#[doc = "Saturating rounding shift left"]
13718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_x)"]
13719#[inline(always)]
13720#[target_feature(enable = "sve,sve2")]
13721#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13722#[cfg_attr(test, assert_instr(sqrshl))]
13723pub fn svqrshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
13724 svqrshl_s16_m(pg, op1, op2)
13725}
13726#[doc = "Saturating rounding shift left"]
13727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_x)"]
13728#[inline(always)]
13729#[target_feature(enable = "sve,sve2")]
13730#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13731#[cfg_attr(test, assert_instr(sqrshl))]
13732pub fn svqrshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
13733 svqrshl_s16_x(pg, op1, svdup_n_s16(op2))
13734}
13735#[doc = "Saturating rounding shift left"]
13736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_z)"]
13737#[inline(always)]
13738#[target_feature(enable = "sve,sve2")]
13739#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13740#[cfg_attr(test, assert_instr(sqrshl))]
13741pub fn svqrshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
13742 svqrshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
13743}
13744#[doc = "Saturating rounding shift left"]
13745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_z)"]
13746#[inline(always)]
13747#[target_feature(enable = "sve,sve2")]
13748#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13749#[cfg_attr(test, assert_instr(sqrshl))]
13750pub fn svqrshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
13751 svqrshl_s16_z(pg, op1, svdup_n_s16(op2))
13752}
13753#[doc = "Saturating rounding shift left"]
13754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_m)"]
13755#[inline(always)]
13756#[target_feature(enable = "sve,sve2")]
13757#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13758#[cfg_attr(test, assert_instr(sqrshl))]
13759pub fn svqrshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
13760 unsafe extern "unadjusted" {
13761 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv4i32")]
13762 fn _svqrshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
13763 }
13764 unsafe { _svqrshl_s32_m(pg.sve_into(), op1, op2) }
13765}
13766#[doc = "Saturating rounding shift left"]
13767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_m)"]
13768#[inline(always)]
13769#[target_feature(enable = "sve,sve2")]
13770#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13771#[cfg_attr(test, assert_instr(sqrshl))]
13772pub fn svqrshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
13773 svqrshl_s32_m(pg, op1, svdup_n_s32(op2))
13774}
13775#[doc = "Saturating rounding shift left"]
13776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_x)"]
13777#[inline(always)]
13778#[target_feature(enable = "sve,sve2")]
13779#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13780#[cfg_attr(test, assert_instr(sqrshl))]
13781pub fn svqrshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
13782 svqrshl_s32_m(pg, op1, op2)
13783}
13784#[doc = "Saturating rounding shift left"]
13785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_x)"]
13786#[inline(always)]
13787#[target_feature(enable = "sve,sve2")]
13788#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13789#[cfg_attr(test, assert_instr(sqrshl))]
13790pub fn svqrshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
13791 svqrshl_s32_x(pg, op1, svdup_n_s32(op2))
13792}
13793#[doc = "Saturating rounding shift left"]
13794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_z)"]
13795#[inline(always)]
13796#[target_feature(enable = "sve,sve2")]
13797#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13798#[cfg_attr(test, assert_instr(sqrshl))]
13799pub fn svqrshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
13800 svqrshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
13801}
13802#[doc = "Saturating rounding shift left"]
13803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_z)"]
13804#[inline(always)]
13805#[target_feature(enable = "sve,sve2")]
13806#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13807#[cfg_attr(test, assert_instr(sqrshl))]
13808pub fn svqrshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
13809 svqrshl_s32_z(pg, op1, svdup_n_s32(op2))
13810}
13811#[doc = "Saturating rounding shift left"]
13812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_m)"]
13813#[inline(always)]
13814#[target_feature(enable = "sve,sve2")]
13815#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13816#[cfg_attr(test, assert_instr(sqrshl))]
13817pub fn svqrshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
13818 unsafe extern "unadjusted" {
13819 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv2i64")]
13820 fn _svqrshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
13821 }
13822 unsafe { _svqrshl_s64_m(pg.sve_into(), op1, op2) }
13823}
13824#[doc = "Saturating rounding shift left"]
13825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_m)"]
13826#[inline(always)]
13827#[target_feature(enable = "sve,sve2")]
13828#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13829#[cfg_attr(test, assert_instr(sqrshl))]
13830pub fn svqrshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
13831 svqrshl_s64_m(pg, op1, svdup_n_s64(op2))
13832}
13833#[doc = "Saturating rounding shift left"]
13834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_x)"]
13835#[inline(always)]
13836#[target_feature(enable = "sve,sve2")]
13837#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13838#[cfg_attr(test, assert_instr(sqrshl))]
13839pub fn svqrshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
13840 svqrshl_s64_m(pg, op1, op2)
13841}
13842#[doc = "Saturating rounding shift left"]
13843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_x)"]
13844#[inline(always)]
13845#[target_feature(enable = "sve,sve2")]
13846#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13847#[cfg_attr(test, assert_instr(sqrshl))]
13848pub fn svqrshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
13849 svqrshl_s64_x(pg, op1, svdup_n_s64(op2))
13850}
13851#[doc = "Saturating rounding shift left"]
13852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_z)"]
13853#[inline(always)]
13854#[target_feature(enable = "sve,sve2")]
13855#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13856#[cfg_attr(test, assert_instr(sqrshl))]
13857pub fn svqrshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
13858 svqrshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
13859}
13860#[doc = "Saturating rounding shift left"]
13861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_z)"]
13862#[inline(always)]
13863#[target_feature(enable = "sve,sve2")]
13864#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13865#[cfg_attr(test, assert_instr(sqrshl))]
13866pub fn svqrshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
13867 svqrshl_s64_z(pg, op1, svdup_n_s64(op2))
13868}
13869#[doc = "Saturating rounding shift left"]
13870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_m)"]
13871#[inline(always)]
13872#[target_feature(enable = "sve,sve2")]
13873#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13874#[cfg_attr(test, assert_instr(uqrshl))]
13875pub fn svqrshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
13876 unsafe extern "unadjusted" {
13877 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv16i8")]
13878 fn _svqrshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
13879 }
13880 unsafe { _svqrshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() }
13881}
13882#[doc = "Saturating rounding shift left"]
13883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_m)"]
13884#[inline(always)]
13885#[target_feature(enable = "sve,sve2")]
13886#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13887#[cfg_attr(test, assert_instr(uqrshl))]
13888pub fn svqrshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
13889 svqrshl_u8_m(pg, op1, svdup_n_s8(op2))
13890}
13891#[doc = "Saturating rounding shift left"]
13892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_x)"]
13893#[inline(always)]
13894#[target_feature(enable = "sve,sve2")]
13895#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13896#[cfg_attr(test, assert_instr(uqrshl))]
13897pub fn svqrshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
13898 svqrshl_u8_m(pg, op1, op2)
13899}
13900#[doc = "Saturating rounding shift left"]
13901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_x)"]
13902#[inline(always)]
13903#[target_feature(enable = "sve,sve2")]
13904#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13905#[cfg_attr(test, assert_instr(uqrshl))]
13906pub fn svqrshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
13907 svqrshl_u8_x(pg, op1, svdup_n_s8(op2))
13908}
13909#[doc = "Saturating rounding shift left"]
13910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_z)"]
13911#[inline(always)]
13912#[target_feature(enable = "sve,sve2")]
13913#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13914#[cfg_attr(test, assert_instr(uqrshl))]
13915pub fn svqrshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
13916 svqrshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
13917}
13918#[doc = "Saturating rounding shift left"]
13919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_z)"]
13920#[inline(always)]
13921#[target_feature(enable = "sve,sve2")]
13922#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13923#[cfg_attr(test, assert_instr(uqrshl))]
13924pub fn svqrshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
13925 svqrshl_u8_z(pg, op1, svdup_n_s8(op2))
13926}
13927#[doc = "Saturating rounding shift left"]
13928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_m)"]
13929#[inline(always)]
13930#[target_feature(enable = "sve,sve2")]
13931#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13932#[cfg_attr(test, assert_instr(uqrshl))]
13933pub fn svqrshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
13934 unsafe extern "unadjusted" {
13935 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv8i16")]
13936 fn _svqrshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
13937 }
13938 unsafe { _svqrshl_u16_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() }
13939}
13940#[doc = "Saturating rounding shift left"]
13941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_m)"]
13942#[inline(always)]
13943#[target_feature(enable = "sve,sve2")]
13944#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13945#[cfg_attr(test, assert_instr(uqrshl))]
13946pub fn svqrshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
13947 svqrshl_u16_m(pg, op1, svdup_n_s16(op2))
13948}
13949#[doc = "Saturating rounding shift left"]
13950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_x)"]
13951#[inline(always)]
13952#[target_feature(enable = "sve,sve2")]
13953#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13954#[cfg_attr(test, assert_instr(uqrshl))]
13955pub fn svqrshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
13956 svqrshl_u16_m(pg, op1, op2)
13957}
13958#[doc = "Saturating rounding shift left"]
13959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_x)"]
13960#[inline(always)]
13961#[target_feature(enable = "sve,sve2")]
13962#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13963#[cfg_attr(test, assert_instr(uqrshl))]
13964pub fn svqrshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
13965 svqrshl_u16_x(pg, op1, svdup_n_s16(op2))
13966}
13967#[doc = "Saturating rounding shift left"]
13968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_z)"]
13969#[inline(always)]
13970#[target_feature(enable = "sve,sve2")]
13971#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13972#[cfg_attr(test, assert_instr(uqrshl))]
13973pub fn svqrshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
13974 svqrshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
13975}
13976#[doc = "Saturating rounding shift left"]
13977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_z)"]
13978#[inline(always)]
13979#[target_feature(enable = "sve,sve2")]
13980#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13981#[cfg_attr(test, assert_instr(uqrshl))]
13982pub fn svqrshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
13983 svqrshl_u16_z(pg, op1, svdup_n_s16(op2))
13984}
13985#[doc = "Saturating rounding shift left"]
13986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_m)"]
13987#[inline(always)]
13988#[target_feature(enable = "sve,sve2")]
13989#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13990#[cfg_attr(test, assert_instr(uqrshl))]
13991pub fn svqrshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
13992 unsafe extern "unadjusted" {
13993 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv4i32")]
13994 fn _svqrshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
13995 }
13996 unsafe { _svqrshl_u32_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() }
13997}
13998#[doc = "Saturating rounding shift left"]
13999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_m)"]
14000#[inline(always)]
14001#[target_feature(enable = "sve,sve2")]
14002#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14003#[cfg_attr(test, assert_instr(uqrshl))]
14004pub fn svqrshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
14005 svqrshl_u32_m(pg, op1, svdup_n_s32(op2))
14006}
14007#[doc = "Saturating rounding shift left"]
14008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_x)"]
14009#[inline(always)]
14010#[target_feature(enable = "sve,sve2")]
14011#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14012#[cfg_attr(test, assert_instr(uqrshl))]
14013pub fn svqrshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
14014 svqrshl_u32_m(pg, op1, op2)
14015}
14016#[doc = "Saturating rounding shift left"]
14017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_x)"]
14018#[inline(always)]
14019#[target_feature(enable = "sve,sve2")]
14020#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14021#[cfg_attr(test, assert_instr(uqrshl))]
14022pub fn svqrshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
14023 svqrshl_u32_x(pg, op1, svdup_n_s32(op2))
14024}
14025#[doc = "Saturating rounding shift left"]
14026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_z)"]
14027#[inline(always)]
14028#[target_feature(enable = "sve,sve2")]
14029#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14030#[cfg_attr(test, assert_instr(uqrshl))]
14031pub fn svqrshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
14032 svqrshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
14033}
14034#[doc = "Saturating rounding shift left"]
14035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_z)"]
14036#[inline(always)]
14037#[target_feature(enable = "sve,sve2")]
14038#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14039#[cfg_attr(test, assert_instr(uqrshl))]
14040pub fn svqrshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
14041 svqrshl_u32_z(pg, op1, svdup_n_s32(op2))
14042}
14043#[doc = "Saturating rounding shift left"]
14044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_m)"]
14045#[inline(always)]
14046#[target_feature(enable = "sve,sve2")]
14047#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14048#[cfg_attr(test, assert_instr(uqrshl))]
14049pub fn svqrshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
14050 unsafe extern "unadjusted" {
14051 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv2i64")]
14052 fn _svqrshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
14053 }
14054 unsafe { _svqrshl_u64_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() }
14055}
14056#[doc = "Saturating rounding shift left"]
14057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_m)"]
14058#[inline(always)]
14059#[target_feature(enable = "sve,sve2")]
14060#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14061#[cfg_attr(test, assert_instr(uqrshl))]
14062pub fn svqrshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
14063 svqrshl_u64_m(pg, op1, svdup_n_s64(op2))
14064}
14065#[doc = "Saturating rounding shift left"]
14066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_x)"]
14067#[inline(always)]
14068#[target_feature(enable = "sve,sve2")]
14069#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14070#[cfg_attr(test, assert_instr(uqrshl))]
14071pub fn svqrshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
14072 svqrshl_u64_m(pg, op1, op2)
14073}
14074#[doc = "Saturating rounding shift left"]
14075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_x)"]
14076#[inline(always)]
14077#[target_feature(enable = "sve,sve2")]
14078#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14079#[cfg_attr(test, assert_instr(uqrshl))]
14080pub fn svqrshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
14081 svqrshl_u64_x(pg, op1, svdup_n_s64(op2))
14082}
14083#[doc = "Saturating rounding shift left"]
14084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_z)"]
14085#[inline(always)]
14086#[target_feature(enable = "sve,sve2")]
14087#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14088#[cfg_attr(test, assert_instr(uqrshl))]
14089pub fn svqrshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
14090 svqrshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
14091}
14092#[doc = "Saturating rounding shift left"]
14093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_z)"]
14094#[inline(always)]
14095#[target_feature(enable = "sve,sve2")]
14096#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14097#[cfg_attr(test, assert_instr(uqrshl))]
14098pub fn svqrshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
14099 svqrshl_u64_z(pg, op1, svdup_n_s64(op2))
14100}
14101#[doc = "Saturating rounding shift right narrow (bottom)"]
14102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s16])"]
14103#[inline(always)]
14104#[target_feature(enable = "sve,sve2")]
14105#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14106#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))]
14107pub fn svqrshrnb_n_s16<const IMM2: i32>(op1: svint16_t) -> svint8_t {
14108 static_assert_range!(IMM2, 1..=8);
14109 unsafe extern "unadjusted" {
14110 #[cfg_attr(
14111 target_arch = "aarch64",
14112 link_name = "llvm.aarch64.sve.sqrshrnb.nxv8i16"
14113 )]
14114 fn _svqrshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t;
14115 }
14116 unsafe { _svqrshrnb_n_s16(op1, IMM2) }
14117}
14118#[doc = "Saturating rounding shift right narrow (bottom)"]
14119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s32])"]
14120#[inline(always)]
14121#[target_feature(enable = "sve,sve2")]
14122#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14123#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))]
14124pub fn svqrshrnb_n_s32<const IMM2: i32>(op1: svint32_t) -> svint16_t {
14125 static_assert_range!(IMM2, 1..=16);
14126 unsafe extern "unadjusted" {
14127 #[cfg_attr(
14128 target_arch = "aarch64",
14129 link_name = "llvm.aarch64.sve.sqrshrnb.nxv4i32"
14130 )]
14131 fn _svqrshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t;
14132 }
14133 unsafe { _svqrshrnb_n_s32(op1, IMM2) }
14134}
14135#[doc = "Saturating rounding shift right narrow (bottom)"]
14136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s64])"]
14137#[inline(always)]
14138#[target_feature(enable = "sve,sve2")]
14139#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14140#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))]
14141pub fn svqrshrnb_n_s64<const IMM2: i32>(op1: svint64_t) -> svint32_t {
14142 static_assert_range!(IMM2, 1..=32);
14143 unsafe extern "unadjusted" {
14144 #[cfg_attr(
14145 target_arch = "aarch64",
14146 link_name = "llvm.aarch64.sve.sqrshrnb.nxv2i64"
14147 )]
14148 fn _svqrshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t;
14149 }
14150 unsafe { _svqrshrnb_n_s64(op1, IMM2) }
14151}
14152#[doc = "Saturating rounding shift right narrow (bottom)"]
14153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u16])"]
14154#[inline(always)]
14155#[target_feature(enable = "sve,sve2")]
14156#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14157#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))]
14158pub fn svqrshrnb_n_u16<const IMM2: i32>(op1: svuint16_t) -> svuint8_t {
14159 static_assert_range!(IMM2, 1..=8);
14160 unsafe extern "unadjusted" {
14161 #[cfg_attr(
14162 target_arch = "aarch64",
14163 link_name = "llvm.aarch64.sve.uqrshrnb.nxv8i16"
14164 )]
14165 fn _svqrshrnb_n_u16(op1: svint16_t, imm2: i32) -> svint8_t;
14166 }
14167 unsafe { _svqrshrnb_n_u16(op1.as_signed(), IMM2).as_unsigned() }
14168}
14169#[doc = "Saturating rounding shift right narrow (bottom)"]
14170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u32])"]
14171#[inline(always)]
14172#[target_feature(enable = "sve,sve2")]
14173#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14174#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))]
14175pub fn svqrshrnb_n_u32<const IMM2: i32>(op1: svuint32_t) -> svuint16_t {
14176 static_assert_range!(IMM2, 1..=16);
14177 unsafe extern "unadjusted" {
14178 #[cfg_attr(
14179 target_arch = "aarch64",
14180 link_name = "llvm.aarch64.sve.uqrshrnb.nxv4i32"
14181 )]
14182 fn _svqrshrnb_n_u32(op1: svint32_t, imm2: i32) -> svint16_t;
14183 }
14184 unsafe { _svqrshrnb_n_u32(op1.as_signed(), IMM2).as_unsigned() }
14185}
14186#[doc = "Saturating rounding shift right narrow (bottom)"]
14187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u64])"]
14188#[inline(always)]
14189#[target_feature(enable = "sve,sve2")]
14190#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14191#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))]
14192pub fn svqrshrnb_n_u64<const IMM2: i32>(op1: svuint64_t) -> svuint32_t {
14193 static_assert_range!(IMM2, 1..=32);
14194 unsafe extern "unadjusted" {
14195 #[cfg_attr(
14196 target_arch = "aarch64",
14197 link_name = "llvm.aarch64.sve.uqrshrnb.nxv2i64"
14198 )]
14199 fn _svqrshrnb_n_u64(op1: svint64_t, imm2: i32) -> svint32_t;
14200 }
14201 unsafe { _svqrshrnb_n_u64(op1.as_signed(), IMM2).as_unsigned() }
14202}
14203#[doc = "Saturating rounding shift right narrow (top)"]
14204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s16])"]
14205#[inline(always)]
14206#[target_feature(enable = "sve,sve2")]
14207#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14208#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))]
14209pub fn svqrshrnt_n_s16<const IMM2: i32>(even: svint8_t, op1: svint16_t) -> svint8_t {
14210 static_assert_range!(IMM2, 1..=8);
14211 unsafe extern "unadjusted" {
14212 #[cfg_attr(
14213 target_arch = "aarch64",
14214 link_name = "llvm.aarch64.sve.sqrshrnt.nxv8i16"
14215 )]
14216 fn _svqrshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t;
14217 }
14218 unsafe { _svqrshrnt_n_s16(even, op1, IMM2) }
14219}
14220#[doc = "Saturating rounding shift right narrow (top)"]
14221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s32])"]
14222#[inline(always)]
14223#[target_feature(enable = "sve,sve2")]
14224#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14225#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))]
14226pub fn svqrshrnt_n_s32<const IMM2: i32>(even: svint16_t, op1: svint32_t) -> svint16_t {
14227 static_assert_range!(IMM2, 1..=16);
14228 unsafe extern "unadjusted" {
14229 #[cfg_attr(
14230 target_arch = "aarch64",
14231 link_name = "llvm.aarch64.sve.sqrshrnt.nxv4i32"
14232 )]
14233 fn _svqrshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t;
14234 }
14235 unsafe { _svqrshrnt_n_s32(even, op1, IMM2) }
14236}
14237#[doc = "Saturating rounding shift right narrow (top)"]
14238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s64])"]
14239#[inline(always)]
14240#[target_feature(enable = "sve,sve2")]
14241#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14242#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))]
14243pub fn svqrshrnt_n_s64<const IMM2: i32>(even: svint32_t, op1: svint64_t) -> svint32_t {
14244 static_assert_range!(IMM2, 1..=32);
14245 unsafe extern "unadjusted" {
14246 #[cfg_attr(
14247 target_arch = "aarch64",
14248 link_name = "llvm.aarch64.sve.sqrshrnt.nxv2i64"
14249 )]
14250 fn _svqrshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t;
14251 }
14252 unsafe { _svqrshrnt_n_s64(even, op1, IMM2) }
14253}
14254#[doc = "Saturating rounding shift right narrow (top)"]
14255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u16])"]
14256#[inline(always)]
14257#[target_feature(enable = "sve,sve2")]
14258#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14259#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))]
14260pub fn svqrshrnt_n_u16<const IMM2: i32>(even: svuint8_t, op1: svuint16_t) -> svuint8_t {
14261 static_assert_range!(IMM2, 1..=8);
14262 unsafe extern "unadjusted" {
14263 #[cfg_attr(
14264 target_arch = "aarch64",
14265 link_name = "llvm.aarch64.sve.uqrshrnt.nxv8i16"
14266 )]
14267 fn _svqrshrnt_n_u16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t;
14268 }
14269 unsafe { _svqrshrnt_n_u16(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() }
14270}
14271#[doc = "Saturating rounding shift right narrow (top)"]
14272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u32])"]
14273#[inline(always)]
14274#[target_feature(enable = "sve,sve2")]
14275#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14276#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))]
14277pub fn svqrshrnt_n_u32<const IMM2: i32>(even: svuint16_t, op1: svuint32_t) -> svuint16_t {
14278 static_assert_range!(IMM2, 1..=16);
14279 unsafe extern "unadjusted" {
14280 #[cfg_attr(
14281 target_arch = "aarch64",
14282 link_name = "llvm.aarch64.sve.uqrshrnt.nxv4i32"
14283 )]
14284 fn _svqrshrnt_n_u32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t;
14285 }
14286 unsafe { _svqrshrnt_n_u32(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() }
14287}
14288#[doc = "Saturating rounding shift right narrow (top)"]
14289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u64])"]
14290#[inline(always)]
14291#[target_feature(enable = "sve,sve2")]
14292#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14293#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))]
14294pub fn svqrshrnt_n_u64<const IMM2: i32>(even: svuint32_t, op1: svuint64_t) -> svuint32_t {
14295 static_assert_range!(IMM2, 1..=32);
14296 unsafe extern "unadjusted" {
14297 #[cfg_attr(
14298 target_arch = "aarch64",
14299 link_name = "llvm.aarch64.sve.uqrshrnt.nxv2i64"
14300 )]
14301 fn _svqrshrnt_n_u64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t;
14302 }
14303 unsafe { _svqrshrnt_n_u64(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() }
14304}
14305#[doc = "Saturating rounding shift right unsigned narrow (bottom)"]
14306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s16])"]
14307#[inline(always)]
14308#[target_feature(enable = "sve,sve2")]
14309#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14310#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))]
14311pub fn svqrshrunb_n_s16<const IMM2: i32>(op1: svint16_t) -> svuint8_t {
14312 static_assert_range!(IMM2, 1..=8);
14313 unsafe extern "unadjusted" {
14314 #[cfg_attr(
14315 target_arch = "aarch64",
14316 link_name = "llvm.aarch64.sve.sqrshrunb.nxv8i16"
14317 )]
14318 fn _svqrshrunb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t;
14319 }
14320 unsafe { _svqrshrunb_n_s16(op1, IMM2).as_unsigned() }
14321}
14322#[doc = "Saturating rounding shift right unsigned narrow (bottom)"]
14323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s32])"]
14324#[inline(always)]
14325#[target_feature(enable = "sve,sve2")]
14326#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14327#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))]
14328pub fn svqrshrunb_n_s32<const IMM2: i32>(op1: svint32_t) -> svuint16_t {
14329 static_assert_range!(IMM2, 1..=16);
14330 unsafe extern "unadjusted" {
14331 #[cfg_attr(
14332 target_arch = "aarch64",
14333 link_name = "llvm.aarch64.sve.sqrshrunb.nxv4i32"
14334 )]
14335 fn _svqrshrunb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t;
14336 }
14337 unsafe { _svqrshrunb_n_s32(op1, IMM2).as_unsigned() }
14338}
14339#[doc = "Saturating rounding shift right unsigned narrow (bottom)"]
14340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s64])"]
14341#[inline(always)]
14342#[target_feature(enable = "sve,sve2")]
14343#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14344#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))]
14345pub fn svqrshrunb_n_s64<const IMM2: i32>(op1: svint64_t) -> svuint32_t {
14346 static_assert_range!(IMM2, 1..=32);
14347 unsafe extern "unadjusted" {
14348 #[cfg_attr(
14349 target_arch = "aarch64",
14350 link_name = "llvm.aarch64.sve.sqrshrunb.nxv2i64"
14351 )]
14352 fn _svqrshrunb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t;
14353 }
14354 unsafe { _svqrshrunb_n_s64(op1, IMM2).as_unsigned() }
14355}
14356#[doc = "Saturating rounding shift right unsigned narrow (top)"]
14357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s16])"]
14358#[inline(always)]
14359#[target_feature(enable = "sve,sve2")]
14360#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14361#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))]
14362pub fn svqrshrunt_n_s16<const IMM2: i32>(even: svuint8_t, op1: svint16_t) -> svuint8_t {
14363 static_assert_range!(IMM2, 1..=8);
14364 unsafe extern "unadjusted" {
14365 #[cfg_attr(
14366 target_arch = "aarch64",
14367 link_name = "llvm.aarch64.sve.sqrshrunt.nxv8i16"
14368 )]
14369 fn _svqrshrunt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t;
14370 }
14371 unsafe { _svqrshrunt_n_s16(even.as_signed(), op1, IMM2).as_unsigned() }
14372}
14373#[doc = "Saturating rounding shift right unsigned narrow (top)"]
14374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s32])"]
14375#[inline(always)]
14376#[target_feature(enable = "sve,sve2")]
14377#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14378#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))]
14379pub fn svqrshrunt_n_s32<const IMM2: i32>(even: svuint16_t, op1: svint32_t) -> svuint16_t {
14380 static_assert_range!(IMM2, 1..=16);
14381 unsafe extern "unadjusted" {
14382 #[cfg_attr(
14383 target_arch = "aarch64",
14384 link_name = "llvm.aarch64.sve.sqrshrunt.nxv4i32"
14385 )]
14386 fn _svqrshrunt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t;
14387 }
14388 unsafe { _svqrshrunt_n_s32(even.as_signed(), op1, IMM2).as_unsigned() }
14389}
14390#[doc = "Saturating rounding shift right unsigned narrow (top)"]
14391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s64])"]
14392#[inline(always)]
14393#[target_feature(enable = "sve,sve2")]
14394#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14395#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))]
14396pub fn svqrshrunt_n_s64<const IMM2: i32>(even: svuint32_t, op1: svint64_t) -> svuint32_t {
14397 static_assert_range!(IMM2, 1..=32);
14398 unsafe extern "unadjusted" {
14399 #[cfg_attr(
14400 target_arch = "aarch64",
14401 link_name = "llvm.aarch64.sve.sqrshrunt.nxv2i64"
14402 )]
14403 fn _svqrshrunt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t;
14404 }
14405 unsafe { _svqrshrunt_n_s64(even.as_signed(), op1, IMM2).as_unsigned() }
14406}
14407#[doc = "Saturating shift left"]
14408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_m)"]
14409#[inline(always)]
14410#[target_feature(enable = "sve,sve2")]
14411#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14412#[cfg_attr(test, assert_instr(sqshl))]
14413pub fn svqshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
14414 unsafe extern "unadjusted" {
14415 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv16i8")]
14416 fn _svqshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
14417 }
14418 unsafe { _svqshl_s8_m(pg, op1, op2) }
14419}
14420#[doc = "Saturating shift left"]
14421#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_m)"]
14422#[inline(always)]
14423#[target_feature(enable = "sve,sve2")]
14424#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14425#[cfg_attr(test, assert_instr(sqshl))]
14426pub fn svqshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
14427 svqshl_s8_m(pg, op1, svdup_n_s8(op2))
14428}
14429#[doc = "Saturating shift left"]
14430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_x)"]
14431#[inline(always)]
14432#[target_feature(enable = "sve,sve2")]
14433#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14434#[cfg_attr(test, assert_instr(sqshl))]
14435pub fn svqshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
14436 svqshl_s8_m(pg, op1, op2)
14437}
14438#[doc = "Saturating shift left"]
14439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_x)"]
14440#[inline(always)]
14441#[target_feature(enable = "sve,sve2")]
14442#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14443#[cfg_attr(test, assert_instr(sqshl))]
14444pub fn svqshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
14445 svqshl_s8_x(pg, op1, svdup_n_s8(op2))
14446}
14447#[doc = "Saturating shift left"]
14448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_z)"]
14449#[inline(always)]
14450#[target_feature(enable = "sve,sve2")]
14451#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14452#[cfg_attr(test, assert_instr(sqshl))]
14453pub fn svqshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
14454 svqshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
14455}
14456#[doc = "Saturating shift left"]
14457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_z)"]
14458#[inline(always)]
14459#[target_feature(enable = "sve,sve2")]
14460#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14461#[cfg_attr(test, assert_instr(sqshl))]
14462pub fn svqshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
14463 svqshl_s8_z(pg, op1, svdup_n_s8(op2))
14464}
14465#[doc = "Saturating shift left"]
14466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_m)"]
14467#[inline(always)]
14468#[target_feature(enable = "sve,sve2")]
14469#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14470#[cfg_attr(test, assert_instr(sqshl))]
14471pub fn svqshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
14472 unsafe extern "unadjusted" {
14473 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv8i16")]
14474 fn _svqshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
14475 }
14476 unsafe { _svqshl_s16_m(pg.sve_into(), op1, op2) }
14477}
14478#[doc = "Saturating shift left"]
14479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_m)"]
14480#[inline(always)]
14481#[target_feature(enable = "sve,sve2")]
14482#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14483#[cfg_attr(test, assert_instr(sqshl))]
14484pub fn svqshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
14485 svqshl_s16_m(pg, op1, svdup_n_s16(op2))
14486}
14487#[doc = "Saturating shift left"]
14488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_x)"]
14489#[inline(always)]
14490#[target_feature(enable = "sve,sve2")]
14491#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14492#[cfg_attr(test, assert_instr(sqshl))]
14493pub fn svqshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
14494 svqshl_s16_m(pg, op1, op2)
14495}
14496#[doc = "Saturating shift left"]
14497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_x)"]
14498#[inline(always)]
14499#[target_feature(enable = "sve,sve2")]
14500#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14501#[cfg_attr(test, assert_instr(sqshl))]
14502pub fn svqshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
14503 svqshl_s16_x(pg, op1, svdup_n_s16(op2))
14504}
14505#[doc = "Saturating shift left"]
14506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_z)"]
14507#[inline(always)]
14508#[target_feature(enable = "sve,sve2")]
14509#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14510#[cfg_attr(test, assert_instr(sqshl))]
14511pub fn svqshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
14512 svqshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
14513}
14514#[doc = "Saturating shift left"]
14515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_z)"]
14516#[inline(always)]
14517#[target_feature(enable = "sve,sve2")]
14518#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14519#[cfg_attr(test, assert_instr(sqshl))]
14520pub fn svqshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
14521 svqshl_s16_z(pg, op1, svdup_n_s16(op2))
14522}
14523#[doc = "Saturating shift left"]
14524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_m)"]
14525#[inline(always)]
14526#[target_feature(enable = "sve,sve2")]
14527#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14528#[cfg_attr(test, assert_instr(sqshl))]
14529pub fn svqshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
14530 unsafe extern "unadjusted" {
14531 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv4i32")]
14532 fn _svqshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
14533 }
14534 unsafe { _svqshl_s32_m(pg.sve_into(), op1, op2) }
14535}
14536#[doc = "Saturating shift left"]
14537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_m)"]
14538#[inline(always)]
14539#[target_feature(enable = "sve,sve2")]
14540#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14541#[cfg_attr(test, assert_instr(sqshl))]
14542pub fn svqshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
14543 svqshl_s32_m(pg, op1, svdup_n_s32(op2))
14544}
14545#[doc = "Saturating shift left"]
14546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_x)"]
14547#[inline(always)]
14548#[target_feature(enable = "sve,sve2")]
14549#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14550#[cfg_attr(test, assert_instr(sqshl))]
14551pub fn svqshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
14552 svqshl_s32_m(pg, op1, op2)
14553}
14554#[doc = "Saturating shift left"]
14555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_x)"]
14556#[inline(always)]
14557#[target_feature(enable = "sve,sve2")]
14558#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14559#[cfg_attr(test, assert_instr(sqshl))]
14560pub fn svqshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
14561 svqshl_s32_x(pg, op1, svdup_n_s32(op2))
14562}
14563#[doc = "Saturating shift left"]
14564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_z)"]
14565#[inline(always)]
14566#[target_feature(enable = "sve,sve2")]
14567#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14568#[cfg_attr(test, assert_instr(sqshl))]
14569pub fn svqshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
14570 svqshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
14571}
14572#[doc = "Saturating shift left"]
14573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_z)"]
14574#[inline(always)]
14575#[target_feature(enable = "sve,sve2")]
14576#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14577#[cfg_attr(test, assert_instr(sqshl))]
14578pub fn svqshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
14579 svqshl_s32_z(pg, op1, svdup_n_s32(op2))
14580}
14581#[doc = "Saturating shift left"]
14582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_m)"]
14583#[inline(always)]
14584#[target_feature(enable = "sve,sve2")]
14585#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14586#[cfg_attr(test, assert_instr(sqshl))]
14587pub fn svqshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
14588 unsafe extern "unadjusted" {
14589 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv2i64")]
14590 fn _svqshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
14591 }
14592 unsafe { _svqshl_s64_m(pg.sve_into(), op1, op2) }
14593}
14594#[doc = "Saturating shift left"]
14595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_m)"]
14596#[inline(always)]
14597#[target_feature(enable = "sve,sve2")]
14598#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14599#[cfg_attr(test, assert_instr(sqshl))]
14600pub fn svqshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
14601 svqshl_s64_m(pg, op1, svdup_n_s64(op2))
14602}
14603#[doc = "Saturating shift left"]
14604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_x)"]
14605#[inline(always)]
14606#[target_feature(enable = "sve,sve2")]
14607#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14608#[cfg_attr(test, assert_instr(sqshl))]
14609pub fn svqshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
14610 svqshl_s64_m(pg, op1, op2)
14611}
14612#[doc = "Saturating shift left"]
14613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_x)"]
14614#[inline(always)]
14615#[target_feature(enable = "sve,sve2")]
14616#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14617#[cfg_attr(test, assert_instr(sqshl))]
14618pub fn svqshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
14619 svqshl_s64_x(pg, op1, svdup_n_s64(op2))
14620}
14621#[doc = "Saturating shift left"]
14622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_z)"]
14623#[inline(always)]
14624#[target_feature(enable = "sve,sve2")]
14625#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14626#[cfg_attr(test, assert_instr(sqshl))]
14627pub fn svqshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
14628 svqshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
14629}
14630#[doc = "Saturating shift left"]
14631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_z)"]
14632#[inline(always)]
14633#[target_feature(enable = "sve,sve2")]
14634#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14635#[cfg_attr(test, assert_instr(sqshl))]
14636pub fn svqshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
14637 svqshl_s64_z(pg, op1, svdup_n_s64(op2))
14638}
14639#[doc = "Saturating shift left"]
14640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_m)"]
14641#[inline(always)]
14642#[target_feature(enable = "sve,sve2")]
14643#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14644#[cfg_attr(test, assert_instr(uqshl))]
14645pub fn svqshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
14646 unsafe extern "unadjusted" {
14647 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv16i8")]
14648 fn _svqshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
14649 }
14650 unsafe { _svqshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() }
14651}
14652#[doc = "Saturating shift left"]
14653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_m)"]
14654#[inline(always)]
14655#[target_feature(enable = "sve,sve2")]
14656#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14657#[cfg_attr(test, assert_instr(uqshl))]
14658pub fn svqshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
14659 svqshl_u8_m(pg, op1, svdup_n_s8(op2))
14660}
14661#[doc = "Saturating shift left"]
14662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_x)"]
14663#[inline(always)]
14664#[target_feature(enable = "sve,sve2")]
14665#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14666#[cfg_attr(test, assert_instr(uqshl))]
14667pub fn svqshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
14668 svqshl_u8_m(pg, op1, op2)
14669}
14670#[doc = "Saturating shift left"]
14671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_x)"]
14672#[inline(always)]
14673#[target_feature(enable = "sve,sve2")]
14674#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14675#[cfg_attr(test, assert_instr(uqshl))]
14676pub fn svqshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
14677 svqshl_u8_x(pg, op1, svdup_n_s8(op2))
14678}
14679#[doc = "Saturating shift left"]
14680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_z)"]
14681#[inline(always)]
14682#[target_feature(enable = "sve,sve2")]
14683#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14684#[cfg_attr(test, assert_instr(uqshl))]
14685pub fn svqshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
14686 svqshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
14687}
14688#[doc = "Saturating shift left"]
14689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_z)"]
14690#[inline(always)]
14691#[target_feature(enable = "sve,sve2")]
14692#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14693#[cfg_attr(test, assert_instr(uqshl))]
14694pub fn svqshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
14695 svqshl_u8_z(pg, op1, svdup_n_s8(op2))
14696}
14697#[doc = "Saturating shift left"]
14698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_m)"]
14699#[inline(always)]
14700#[target_feature(enable = "sve,sve2")]
14701#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14702#[cfg_attr(test, assert_instr(uqshl))]
14703pub fn svqshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
14704 unsafe extern "unadjusted" {
14705 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv8i16")]
14706 fn _svqshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
14707 }
14708 unsafe { _svqshl_u16_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() }
14709}
14710#[doc = "Saturating shift left"]
14711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_m)"]
14712#[inline(always)]
14713#[target_feature(enable = "sve,sve2")]
14714#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14715#[cfg_attr(test, assert_instr(uqshl))]
14716pub fn svqshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
14717 svqshl_u16_m(pg, op1, svdup_n_s16(op2))
14718}
14719#[doc = "Saturating shift left"]
14720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_x)"]
14721#[inline(always)]
14722#[target_feature(enable = "sve,sve2")]
14723#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14724#[cfg_attr(test, assert_instr(uqshl))]
14725pub fn svqshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
14726 svqshl_u16_m(pg, op1, op2)
14727}
14728#[doc = "Saturating shift left"]
14729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_x)"]
14730#[inline(always)]
14731#[target_feature(enable = "sve,sve2")]
14732#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14733#[cfg_attr(test, assert_instr(uqshl))]
14734pub fn svqshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
14735 svqshl_u16_x(pg, op1, svdup_n_s16(op2))
14736}
14737#[doc = "Saturating shift left"]
14738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_z)"]
14739#[inline(always)]
14740#[target_feature(enable = "sve,sve2")]
14741#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14742#[cfg_attr(test, assert_instr(uqshl))]
14743pub fn svqshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
14744 svqshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
14745}
14746#[doc = "Saturating shift left"]
14747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_z)"]
14748#[inline(always)]
14749#[target_feature(enable = "sve,sve2")]
14750#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14751#[cfg_attr(test, assert_instr(uqshl))]
14752pub fn svqshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
14753 svqshl_u16_z(pg, op1, svdup_n_s16(op2))
14754}
14755#[doc = "Saturating shift left"]
14756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_m)"]
14757#[inline(always)]
14758#[target_feature(enable = "sve,sve2")]
14759#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14760#[cfg_attr(test, assert_instr(uqshl))]
14761pub fn svqshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
14762 unsafe extern "unadjusted" {
14763 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv4i32")]
14764 fn _svqshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
14765 }
14766 unsafe { _svqshl_u32_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() }
14767}
14768#[doc = "Saturating shift left"]
14769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_m)"]
14770#[inline(always)]
14771#[target_feature(enable = "sve,sve2")]
14772#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14773#[cfg_attr(test, assert_instr(uqshl))]
14774pub fn svqshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
14775 svqshl_u32_m(pg, op1, svdup_n_s32(op2))
14776}
14777#[doc = "Saturating shift left"]
14778#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_x)"]
14779#[inline(always)]
14780#[target_feature(enable = "sve,sve2")]
14781#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14782#[cfg_attr(test, assert_instr(uqshl))]
14783pub fn svqshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
14784 svqshl_u32_m(pg, op1, op2)
14785}
14786#[doc = "Saturating shift left"]
14787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_x)"]
14788#[inline(always)]
14789#[target_feature(enable = "sve,sve2")]
14790#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14791#[cfg_attr(test, assert_instr(uqshl))]
14792pub fn svqshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
14793 svqshl_u32_x(pg, op1, svdup_n_s32(op2))
14794}
14795#[doc = "Saturating shift left"]
14796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_z)"]
14797#[inline(always)]
14798#[target_feature(enable = "sve,sve2")]
14799#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14800#[cfg_attr(test, assert_instr(uqshl))]
14801pub fn svqshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
14802 svqshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
14803}
14804#[doc = "Saturating shift left"]
14805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_z)"]
14806#[inline(always)]
14807#[target_feature(enable = "sve,sve2")]
14808#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14809#[cfg_attr(test, assert_instr(uqshl))]
14810pub fn svqshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
14811 svqshl_u32_z(pg, op1, svdup_n_s32(op2))
14812}
14813#[doc = "Saturating shift left"]
14814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_m)"]
14815#[inline(always)]
14816#[target_feature(enable = "sve,sve2")]
14817#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14818#[cfg_attr(test, assert_instr(uqshl))]
14819pub fn svqshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
14820 unsafe extern "unadjusted" {
14821 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv2i64")]
14822 fn _svqshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
14823 }
14824 unsafe { _svqshl_u64_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() }
14825}
14826#[doc = "Saturating shift left"]
14827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_m)"]
14828#[inline(always)]
14829#[target_feature(enable = "sve,sve2")]
14830#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14831#[cfg_attr(test, assert_instr(uqshl))]
14832pub fn svqshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
14833 svqshl_u64_m(pg, op1, svdup_n_s64(op2))
14834}
14835#[doc = "Saturating shift left"]
14836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_x)"]
14837#[inline(always)]
14838#[target_feature(enable = "sve,sve2")]
14839#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14840#[cfg_attr(test, assert_instr(uqshl))]
14841pub fn svqshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
14842 svqshl_u64_m(pg, op1, op2)
14843}
14844#[doc = "Saturating shift left"]
14845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_x)"]
14846#[inline(always)]
14847#[target_feature(enable = "sve,sve2")]
14848#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14849#[cfg_attr(test, assert_instr(uqshl))]
14850pub fn svqshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
14851 svqshl_u64_x(pg, op1, svdup_n_s64(op2))
14852}
14853#[doc = "Saturating shift left"]
14854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_z)"]
14855#[inline(always)]
14856#[target_feature(enable = "sve,sve2")]
14857#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14858#[cfg_attr(test, assert_instr(uqshl))]
14859pub fn svqshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
14860 svqshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
14861}
14862#[doc = "Saturating shift left"]
14863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_z)"]
14864#[inline(always)]
14865#[target_feature(enable = "sve,sve2")]
14866#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14867#[cfg_attr(test, assert_instr(uqshl))]
14868pub fn svqshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
14869 svqshl_u64_z(pg, op1, svdup_n_s64(op2))
14870}
14871#[doc = "Saturating shift left unsigned"]
14872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_m)"]
14873#[inline(always)]
14874#[target_feature(enable = "sve,sve2")]
14875#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14876#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
14877pub fn svqshlu_n_s8_m<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svuint8_t {
14878 static_assert_range!(IMM2, 0..=7);
14879 unsafe extern "unadjusted" {
14880 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv16i8")]
14881 fn _svqshlu_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t;
14882 }
14883 unsafe { _svqshlu_n_s8_m(pg, op1, IMM2).as_unsigned() }
14884}
14885#[doc = "Saturating shift left unsigned"]
14886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_x)"]
14887#[inline(always)]
14888#[target_feature(enable = "sve,sve2")]
14889#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14890#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
14891pub fn svqshlu_n_s8_x<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svuint8_t {
14892 svqshlu_n_s8_m::<IMM2>(pg, op1)
14893}
14894#[doc = "Saturating shift left unsigned"]
14895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_z)"]
14896#[inline(always)]
14897#[target_feature(enable = "sve,sve2")]
14898#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14899#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
14900pub fn svqshlu_n_s8_z<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svuint8_t {
14901 svqshlu_n_s8_m::<IMM2>(pg, svsel_s8(pg, op1, svdup_n_s8(0)))
14902}
14903#[doc = "Saturating shift left unsigned"]
14904#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_m)"]
14905#[inline(always)]
14906#[target_feature(enable = "sve,sve2")]
14907#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14908#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
14909pub fn svqshlu_n_s16_m<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svuint16_t {
14910 static_assert_range!(IMM2, 0..=15);
14911 unsafe extern "unadjusted" {
14912 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv8i16")]
14913 fn _svqshlu_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t;
14914 }
14915 unsafe { _svqshlu_n_s16_m(pg.sve_into(), op1, IMM2).as_unsigned() }
14916}
14917#[doc = "Saturating shift left unsigned"]
14918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_x)"]
14919#[inline(always)]
14920#[target_feature(enable = "sve,sve2")]
14921#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14922#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
14923pub fn svqshlu_n_s16_x<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svuint16_t {
14924 svqshlu_n_s16_m::<IMM2>(pg, op1)
14925}
14926#[doc = "Saturating shift left unsigned"]
14927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_z)"]
14928#[inline(always)]
14929#[target_feature(enable = "sve,sve2")]
14930#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14931#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
14932pub fn svqshlu_n_s16_z<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svuint16_t {
14933 svqshlu_n_s16_m::<IMM2>(pg, svsel_s16(pg, op1, svdup_n_s16(0)))
14934}
14935#[doc = "Saturating shift left unsigned"]
14936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_m)"]
14937#[inline(always)]
14938#[target_feature(enable = "sve,sve2")]
14939#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14940#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
14941pub fn svqshlu_n_s32_m<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svuint32_t {
14942 static_assert_range!(IMM2, 0..=31);
14943 unsafe extern "unadjusted" {
14944 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv4i32")]
14945 fn _svqshlu_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t;
14946 }
14947 unsafe { _svqshlu_n_s32_m(pg.sve_into(), op1, IMM2).as_unsigned() }
14948}
14949#[doc = "Saturating shift left unsigned"]
14950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_x)"]
14951#[inline(always)]
14952#[target_feature(enable = "sve,sve2")]
14953#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14954#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
14955pub fn svqshlu_n_s32_x<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svuint32_t {
14956 svqshlu_n_s32_m::<IMM2>(pg, op1)
14957}
14958#[doc = "Saturating shift left unsigned"]
14959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_z)"]
14960#[inline(always)]
14961#[target_feature(enable = "sve,sve2")]
14962#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14963#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
14964pub fn svqshlu_n_s32_z<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svuint32_t {
14965 svqshlu_n_s32_m::<IMM2>(pg, svsel_s32(pg, op1, svdup_n_s32(0)))
14966}
14967#[doc = "Saturating shift left unsigned"]
14968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_m)"]
14969#[inline(always)]
14970#[target_feature(enable = "sve,sve2")]
14971#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14972#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
14973pub fn svqshlu_n_s64_m<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svuint64_t {
14974 static_assert_range!(IMM2, 0..=63);
14975 unsafe extern "unadjusted" {
14976 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv2i64")]
14977 fn _svqshlu_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t;
14978 }
14979 unsafe { _svqshlu_n_s64_m(pg.sve_into(), op1, IMM2).as_unsigned() }
14980}
14981#[doc = "Saturating shift left unsigned"]
14982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_x)"]
14983#[inline(always)]
14984#[target_feature(enable = "sve,sve2")]
14985#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14986#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
14987pub fn svqshlu_n_s64_x<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svuint64_t {
14988 svqshlu_n_s64_m::<IMM2>(pg, op1)
14989}
14990#[doc = "Saturating shift left unsigned"]
14991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_z)"]
14992#[inline(always)]
14993#[target_feature(enable = "sve,sve2")]
14994#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14995#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
14996pub fn svqshlu_n_s64_z<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svuint64_t {
14997 svqshlu_n_s64_m::<IMM2>(pg, svsel_s64(pg, op1, svdup_n_s64(0)))
14998}
14999#[doc = "Saturating shift right narrow (bottom)"]
15000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s16])"]
15001#[inline(always)]
15002#[target_feature(enable = "sve,sve2")]
15003#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15004#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))]
15005pub fn svqshrnb_n_s16<const IMM2: i32>(op1: svint16_t) -> svint8_t {
15006 static_assert_range!(IMM2, 1..=8);
15007 unsafe extern "unadjusted" {
15008 #[cfg_attr(
15009 target_arch = "aarch64",
15010 link_name = "llvm.aarch64.sve.sqshrnb.nxv8i16"
15011 )]
15012 fn _svqshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t;
15013 }
15014 unsafe { _svqshrnb_n_s16(op1, IMM2) }
15015}
15016#[doc = "Saturating shift right narrow (bottom)"]
15017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s32])"]
15018#[inline(always)]
15019#[target_feature(enable = "sve,sve2")]
15020#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15021#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))]
15022pub fn svqshrnb_n_s32<const IMM2: i32>(op1: svint32_t) -> svint16_t {
15023 static_assert_range!(IMM2, 1..=16);
15024 unsafe extern "unadjusted" {
15025 #[cfg_attr(
15026 target_arch = "aarch64",
15027 link_name = "llvm.aarch64.sve.sqshrnb.nxv4i32"
15028 )]
15029 fn _svqshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t;
15030 }
15031 unsafe { _svqshrnb_n_s32(op1, IMM2) }
15032}
15033#[doc = "Saturating shift right narrow (bottom)"]
15034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s64])"]
15035#[inline(always)]
15036#[target_feature(enable = "sve,sve2")]
15037#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15038#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))]
15039pub fn svqshrnb_n_s64<const IMM2: i32>(op1: svint64_t) -> svint32_t {
15040 static_assert_range!(IMM2, 1..=32);
15041 unsafe extern "unadjusted" {
15042 #[cfg_attr(
15043 target_arch = "aarch64",
15044 link_name = "llvm.aarch64.sve.sqshrnb.nxv2i64"
15045 )]
15046 fn _svqshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t;
15047 }
15048 unsafe { _svqshrnb_n_s64(op1, IMM2) }
15049}
15050#[doc = "Saturating shift right narrow (bottom)"]
15051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u16])"]
15052#[inline(always)]
15053#[target_feature(enable = "sve,sve2")]
15054#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15055#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))]
15056pub fn svqshrnb_n_u16<const IMM2: i32>(op1: svuint16_t) -> svuint8_t {
15057 static_assert_range!(IMM2, 1..=8);
15058 unsafe extern "unadjusted" {
15059 #[cfg_attr(
15060 target_arch = "aarch64",
15061 link_name = "llvm.aarch64.sve.uqshrnb.nxv8i16"
15062 )]
15063 fn _svqshrnb_n_u16(op1: svint16_t, imm2: i32) -> svint8_t;
15064 }
15065 unsafe { _svqshrnb_n_u16(op1.as_signed(), IMM2).as_unsigned() }
15066}
15067#[doc = "Saturating shift right narrow (bottom)"]
15068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u32])"]
15069#[inline(always)]
15070#[target_feature(enable = "sve,sve2")]
15071#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15072#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))]
15073pub fn svqshrnb_n_u32<const IMM2: i32>(op1: svuint32_t) -> svuint16_t {
15074 static_assert_range!(IMM2, 1..=16);
15075 unsafe extern "unadjusted" {
15076 #[cfg_attr(
15077 target_arch = "aarch64",
15078 link_name = "llvm.aarch64.sve.uqshrnb.nxv4i32"
15079 )]
15080 fn _svqshrnb_n_u32(op1: svint32_t, imm2: i32) -> svint16_t;
15081 }
15082 unsafe { _svqshrnb_n_u32(op1.as_signed(), IMM2).as_unsigned() }
15083}
15084#[doc = "Saturating shift right narrow (bottom)"]
15085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u64])"]
15086#[inline(always)]
15087#[target_feature(enable = "sve,sve2")]
15088#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15089#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))]
15090pub fn svqshrnb_n_u64<const IMM2: i32>(op1: svuint64_t) -> svuint32_t {
15091 static_assert_range!(IMM2, 1..=32);
15092 unsafe extern "unadjusted" {
15093 #[cfg_attr(
15094 target_arch = "aarch64",
15095 link_name = "llvm.aarch64.sve.uqshrnb.nxv2i64"
15096 )]
15097 fn _svqshrnb_n_u64(op1: svint64_t, imm2: i32) -> svint32_t;
15098 }
15099 unsafe { _svqshrnb_n_u64(op1.as_signed(), IMM2).as_unsigned() }
15100}
15101#[doc = "Saturating shift right narrow (top)"]
15102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s16])"]
15103#[inline(always)]
15104#[target_feature(enable = "sve,sve2")]
15105#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15106#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))]
15107pub fn svqshrnt_n_s16<const IMM2: i32>(even: svint8_t, op1: svint16_t) -> svint8_t {
15108 static_assert_range!(IMM2, 1..=8);
15109 unsafe extern "unadjusted" {
15110 #[cfg_attr(
15111 target_arch = "aarch64",
15112 link_name = "llvm.aarch64.sve.sqshrnt.nxv8i16"
15113 )]
15114 fn _svqshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t;
15115 }
15116 unsafe { _svqshrnt_n_s16(even, op1, IMM2) }
15117}
15118#[doc = "Saturating shift right narrow (top)"]
15119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s32])"]
15120#[inline(always)]
15121#[target_feature(enable = "sve,sve2")]
15122#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15123#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))]
15124pub fn svqshrnt_n_s32<const IMM2: i32>(even: svint16_t, op1: svint32_t) -> svint16_t {
15125 static_assert_range!(IMM2, 1..=16);
15126 unsafe extern "unadjusted" {
15127 #[cfg_attr(
15128 target_arch = "aarch64",
15129 link_name = "llvm.aarch64.sve.sqshrnt.nxv4i32"
15130 )]
15131 fn _svqshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t;
15132 }
15133 unsafe { _svqshrnt_n_s32(even, op1, IMM2) }
15134}
15135#[doc = "Saturating shift right narrow (top)"]
15136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s64])"]
15137#[inline(always)]
15138#[target_feature(enable = "sve,sve2")]
15139#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15140#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))]
15141pub fn svqshrnt_n_s64<const IMM2: i32>(even: svint32_t, op1: svint64_t) -> svint32_t {
15142 static_assert_range!(IMM2, 1..=32);
15143 unsafe extern "unadjusted" {
15144 #[cfg_attr(
15145 target_arch = "aarch64",
15146 link_name = "llvm.aarch64.sve.sqshrnt.nxv2i64"
15147 )]
15148 fn _svqshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t;
15149 }
15150 unsafe { _svqshrnt_n_s64(even, op1, IMM2) }
15151}
15152#[doc = "Saturating shift right narrow (top)"]
15153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u16])"]
15154#[inline(always)]
15155#[target_feature(enable = "sve,sve2")]
15156#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15157#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))]
15158pub fn svqshrnt_n_u16<const IMM2: i32>(even: svuint8_t, op1: svuint16_t) -> svuint8_t {
15159 static_assert_range!(IMM2, 1..=8);
15160 unsafe extern "unadjusted" {
15161 #[cfg_attr(
15162 target_arch = "aarch64",
15163 link_name = "llvm.aarch64.sve.uqshrnt.nxv8i16"
15164 )]
15165 fn _svqshrnt_n_u16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t;
15166 }
15167 unsafe { _svqshrnt_n_u16(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() }
15168}
15169#[doc = "Saturating shift right narrow (top)"]
15170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u32])"]
15171#[inline(always)]
15172#[target_feature(enable = "sve,sve2")]
15173#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15174#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))]
15175pub fn svqshrnt_n_u32<const IMM2: i32>(even: svuint16_t, op1: svuint32_t) -> svuint16_t {
15176 static_assert_range!(IMM2, 1..=16);
15177 unsafe extern "unadjusted" {
15178 #[cfg_attr(
15179 target_arch = "aarch64",
15180 link_name = "llvm.aarch64.sve.uqshrnt.nxv4i32"
15181 )]
15182 fn _svqshrnt_n_u32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t;
15183 }
15184 unsafe { _svqshrnt_n_u32(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() }
15185}
15186#[doc = "Saturating shift right narrow (top)"]
15187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u64])"]
15188#[inline(always)]
15189#[target_feature(enable = "sve,sve2")]
15190#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15191#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))]
15192pub fn svqshrnt_n_u64<const IMM2: i32>(even: svuint32_t, op1: svuint64_t) -> svuint32_t {
15193 static_assert_range!(IMM2, 1..=32);
15194 unsafe extern "unadjusted" {
15195 #[cfg_attr(
15196 target_arch = "aarch64",
15197 link_name = "llvm.aarch64.sve.uqshrnt.nxv2i64"
15198 )]
15199 fn _svqshrnt_n_u64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t;
15200 }
15201 unsafe { _svqshrnt_n_u64(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() }
15202}
15203#[doc = "Saturating shift right unsigned narrow (bottom)"]
15204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s16])"]
15205#[inline(always)]
15206#[target_feature(enable = "sve,sve2")]
15207#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15208#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))]
15209pub fn svqshrunb_n_s16<const IMM2: i32>(op1: svint16_t) -> svuint8_t {
15210 static_assert_range!(IMM2, 1..=8);
15211 unsafe extern "unadjusted" {
15212 #[cfg_attr(
15213 target_arch = "aarch64",
15214 link_name = "llvm.aarch64.sve.sqshrunb.nxv8i16"
15215 )]
15216 fn _svqshrunb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t;
15217 }
15218 unsafe { _svqshrunb_n_s16(op1, IMM2).as_unsigned() }
15219}
15220#[doc = "Saturating shift right unsigned narrow (bottom)"]
15221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s32])"]
15222#[inline(always)]
15223#[target_feature(enable = "sve,sve2")]
15224#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15225#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))]
15226pub fn svqshrunb_n_s32<const IMM2: i32>(op1: svint32_t) -> svuint16_t {
15227 static_assert_range!(IMM2, 1..=16);
15228 unsafe extern "unadjusted" {
15229 #[cfg_attr(
15230 target_arch = "aarch64",
15231 link_name = "llvm.aarch64.sve.sqshrunb.nxv4i32"
15232 )]
15233 fn _svqshrunb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t;
15234 }
15235 unsafe { _svqshrunb_n_s32(op1, IMM2).as_unsigned() }
15236}
15237#[doc = "Saturating shift right unsigned narrow (bottom)"]
15238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s64])"]
15239#[inline(always)]
15240#[target_feature(enable = "sve,sve2")]
15241#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15242#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))]
15243pub fn svqshrunb_n_s64<const IMM2: i32>(op1: svint64_t) -> svuint32_t {
15244 static_assert_range!(IMM2, 1..=32);
15245 unsafe extern "unadjusted" {
15246 #[cfg_attr(
15247 target_arch = "aarch64",
15248 link_name = "llvm.aarch64.sve.sqshrunb.nxv2i64"
15249 )]
15250 fn _svqshrunb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t;
15251 }
15252 unsafe { _svqshrunb_n_s64(op1, IMM2).as_unsigned() }
15253}
15254#[doc = "Saturating shift right unsigned narrow (top)"]
15255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s16])"]
15256#[inline(always)]
15257#[target_feature(enable = "sve,sve2")]
15258#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15259#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))]
15260pub fn svqshrunt_n_s16<const IMM2: i32>(even: svuint8_t, op1: svint16_t) -> svuint8_t {
15261 static_assert_range!(IMM2, 1..=8);
15262 unsafe extern "unadjusted" {
15263 #[cfg_attr(
15264 target_arch = "aarch64",
15265 link_name = "llvm.aarch64.sve.sqshrunt.nxv8i16"
15266 )]
15267 fn _svqshrunt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t;
15268 }
15269 unsafe { _svqshrunt_n_s16(even.as_signed(), op1, IMM2).as_unsigned() }
15270}
15271#[doc = "Saturating shift right unsigned narrow (top)"]
15272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s32])"]
15273#[inline(always)]
15274#[target_feature(enable = "sve,sve2")]
15275#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15276#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))]
15277pub fn svqshrunt_n_s32<const IMM2: i32>(even: svuint16_t, op1: svint32_t) -> svuint16_t {
15278 static_assert_range!(IMM2, 1..=16);
15279 unsafe extern "unadjusted" {
15280 #[cfg_attr(
15281 target_arch = "aarch64",
15282 link_name = "llvm.aarch64.sve.sqshrunt.nxv4i32"
15283 )]
15284 fn _svqshrunt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t;
15285 }
15286 unsafe { _svqshrunt_n_s32(even.as_signed(), op1, IMM2).as_unsigned() }
15287}
15288#[doc = "Saturating shift right unsigned narrow (top)"]
15289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s64])"]
15290#[inline(always)]
15291#[target_feature(enable = "sve,sve2")]
15292#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15293#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))]
15294pub fn svqshrunt_n_s64<const IMM2: i32>(even: svuint32_t, op1: svint64_t) -> svuint32_t {
15295 static_assert_range!(IMM2, 1..=32);
15296 unsafe extern "unadjusted" {
15297 #[cfg_attr(
15298 target_arch = "aarch64",
15299 link_name = "llvm.aarch64.sve.sqshrunt.nxv2i64"
15300 )]
15301 fn _svqshrunt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t;
15302 }
15303 unsafe { _svqshrunt_n_s64(even.as_signed(), op1, IMM2).as_unsigned() }
15304}
15305#[doc = "Saturating subtract"]
15306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_m)"]
15307#[inline(always)]
15308#[target_feature(enable = "sve,sve2")]
15309#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15310#[cfg_attr(test, assert_instr(sqsub))]
15311pub fn svqsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
15312 unsafe extern "unadjusted" {
15313 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv16i8")]
15314 fn _svqsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
15315 }
15316 unsafe { _svqsub_s8_m(pg, op1, op2) }
15317}
15318#[doc = "Saturating subtract"]
15319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_m)"]
15320#[inline(always)]
15321#[target_feature(enable = "sve,sve2")]
15322#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15323#[cfg_attr(test, assert_instr(sqsub))]
15324pub fn svqsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
15325 svqsub_s8_m(pg, op1, svdup_n_s8(op2))
15326}
15327#[doc = "Saturating subtract"]
15328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_x)"]
15329#[inline(always)]
15330#[target_feature(enable = "sve,sve2")]
15331#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15332#[cfg_attr(test, assert_instr(sqsub))]
15333pub fn svqsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
15334 svqsub_s8_m(pg, op1, op2)
15335}
15336#[doc = "Saturating subtract"]
15337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_x)"]
15338#[inline(always)]
15339#[target_feature(enable = "sve,sve2")]
15340#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15341#[cfg_attr(test, assert_instr(sqsub))]
15342pub fn svqsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
15343 svqsub_s8_x(pg, op1, svdup_n_s8(op2))
15344}
15345#[doc = "Saturating subtract"]
15346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_z)"]
15347#[inline(always)]
15348#[target_feature(enable = "sve,sve2")]
15349#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15350#[cfg_attr(test, assert_instr(sqsub))]
15351pub fn svqsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
15352 svqsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
15353}
15354#[doc = "Saturating subtract"]
15355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_z)"]
15356#[inline(always)]
15357#[target_feature(enable = "sve,sve2")]
15358#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15359#[cfg_attr(test, assert_instr(sqsub))]
15360pub fn svqsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
15361 svqsub_s8_z(pg, op1, svdup_n_s8(op2))
15362}
15363#[doc = "Saturating subtract"]
15364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_m)"]
15365#[inline(always)]
15366#[target_feature(enable = "sve,sve2")]
15367#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15368#[cfg_attr(test, assert_instr(sqsub))]
15369pub fn svqsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
15370 unsafe extern "unadjusted" {
15371 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv8i16")]
15372 fn _svqsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
15373 }
15374 unsafe { _svqsub_s16_m(pg.sve_into(), op1, op2) }
15375}
15376#[doc = "Saturating subtract"]
15377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_m)"]
15378#[inline(always)]
15379#[target_feature(enable = "sve,sve2")]
15380#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15381#[cfg_attr(test, assert_instr(sqsub))]
15382pub fn svqsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
15383 svqsub_s16_m(pg, op1, svdup_n_s16(op2))
15384}
15385#[doc = "Saturating subtract"]
15386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_x)"]
15387#[inline(always)]
15388#[target_feature(enable = "sve,sve2")]
15389#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15390#[cfg_attr(test, assert_instr(sqsub))]
15391pub fn svqsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
15392 svqsub_s16_m(pg, op1, op2)
15393}
15394#[doc = "Saturating subtract"]
15395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_x)"]
15396#[inline(always)]
15397#[target_feature(enable = "sve,sve2")]
15398#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15399#[cfg_attr(test, assert_instr(sqsub))]
15400pub fn svqsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
15401 svqsub_s16_x(pg, op1, svdup_n_s16(op2))
15402}
15403#[doc = "Saturating subtract"]
15404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_z)"]
15405#[inline(always)]
15406#[target_feature(enable = "sve,sve2")]
15407#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15408#[cfg_attr(test, assert_instr(sqsub))]
15409pub fn svqsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
15410 svqsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
15411}
15412#[doc = "Saturating subtract"]
15413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_z)"]
15414#[inline(always)]
15415#[target_feature(enable = "sve,sve2")]
15416#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15417#[cfg_attr(test, assert_instr(sqsub))]
15418pub fn svqsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
15419 svqsub_s16_z(pg, op1, svdup_n_s16(op2))
15420}
15421#[doc = "Saturating subtract"]
15422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_m)"]
15423#[inline(always)]
15424#[target_feature(enable = "sve,sve2")]
15425#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15426#[cfg_attr(test, assert_instr(sqsub))]
15427pub fn svqsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
15428 unsafe extern "unadjusted" {
15429 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv4i32")]
15430 fn _svqsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
15431 }
15432 unsafe { _svqsub_s32_m(pg.sve_into(), op1, op2) }
15433}
15434#[doc = "Saturating subtract"]
15435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_m)"]
15436#[inline(always)]
15437#[target_feature(enable = "sve,sve2")]
15438#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15439#[cfg_attr(test, assert_instr(sqsub))]
15440pub fn svqsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
15441 svqsub_s32_m(pg, op1, svdup_n_s32(op2))
15442}
15443#[doc = "Saturating subtract"]
15444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_x)"]
15445#[inline(always)]
15446#[target_feature(enable = "sve,sve2")]
15447#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15448#[cfg_attr(test, assert_instr(sqsub))]
15449pub fn svqsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
15450 svqsub_s32_m(pg, op1, op2)
15451}
15452#[doc = "Saturating subtract"]
15453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_x)"]
15454#[inline(always)]
15455#[target_feature(enable = "sve,sve2")]
15456#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15457#[cfg_attr(test, assert_instr(sqsub))]
15458pub fn svqsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
15459 svqsub_s32_x(pg, op1, svdup_n_s32(op2))
15460}
15461#[doc = "Saturating subtract"]
15462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_z)"]
15463#[inline(always)]
15464#[target_feature(enable = "sve,sve2")]
15465#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15466#[cfg_attr(test, assert_instr(sqsub))]
15467pub fn svqsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
15468 svqsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
15469}
15470#[doc = "Saturating subtract"]
15471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_z)"]
15472#[inline(always)]
15473#[target_feature(enable = "sve,sve2")]
15474#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15475#[cfg_attr(test, assert_instr(sqsub))]
15476pub fn svqsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
15477 svqsub_s32_z(pg, op1, svdup_n_s32(op2))
15478}
15479#[doc = "Saturating subtract"]
15480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_m)"]
15481#[inline(always)]
15482#[target_feature(enable = "sve,sve2")]
15483#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15484#[cfg_attr(test, assert_instr(sqsub))]
15485pub fn svqsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
15486 unsafe extern "unadjusted" {
15487 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv2i64")]
15488 fn _svqsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
15489 }
15490 unsafe { _svqsub_s64_m(pg.sve_into(), op1, op2) }
15491}
15492#[doc = "Saturating subtract"]
15493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_m)"]
15494#[inline(always)]
15495#[target_feature(enable = "sve,sve2")]
15496#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15497#[cfg_attr(test, assert_instr(sqsub))]
15498pub fn svqsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
15499 svqsub_s64_m(pg, op1, svdup_n_s64(op2))
15500}
15501#[doc = "Saturating subtract"]
15502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_x)"]
15503#[inline(always)]
15504#[target_feature(enable = "sve,sve2")]
15505#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15506#[cfg_attr(test, assert_instr(sqsub))]
15507pub fn svqsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
15508 svqsub_s64_m(pg, op1, op2)
15509}
15510#[doc = "Saturating subtract"]
15511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_x)"]
15512#[inline(always)]
15513#[target_feature(enable = "sve,sve2")]
15514#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15515#[cfg_attr(test, assert_instr(sqsub))]
15516pub fn svqsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
15517 svqsub_s64_x(pg, op1, svdup_n_s64(op2))
15518}
15519#[doc = "Saturating subtract"]
15520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_z)"]
15521#[inline(always)]
15522#[target_feature(enable = "sve,sve2")]
15523#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15524#[cfg_attr(test, assert_instr(sqsub))]
15525pub fn svqsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
15526 svqsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
15527}
15528#[doc = "Saturating subtract"]
15529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_z)"]
15530#[inline(always)]
15531#[target_feature(enable = "sve,sve2")]
15532#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15533#[cfg_attr(test, assert_instr(sqsub))]
15534pub fn svqsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
15535 svqsub_s64_z(pg, op1, svdup_n_s64(op2))
15536}
15537#[doc = "Saturating subtract"]
15538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_m)"]
15539#[inline(always)]
15540#[target_feature(enable = "sve,sve2")]
15541#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15542#[cfg_attr(test, assert_instr(uqsub))]
15543pub fn svqsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
15544 unsafe extern "unadjusted" {
15545 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv16i8")]
15546 fn _svqsub_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
15547 }
15548 unsafe { _svqsub_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
15549}
15550#[doc = "Saturating subtract"]
15551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_m)"]
15552#[inline(always)]
15553#[target_feature(enable = "sve,sve2")]
15554#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15555#[cfg_attr(test, assert_instr(uqsub))]
15556pub fn svqsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
15557 svqsub_u8_m(pg, op1, svdup_n_u8(op2))
15558}
15559#[doc = "Saturating subtract"]
15560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_x)"]
15561#[inline(always)]
15562#[target_feature(enable = "sve,sve2")]
15563#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15564#[cfg_attr(test, assert_instr(uqsub))]
15565pub fn svqsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
15566 svqsub_u8_m(pg, op1, op2)
15567}
15568#[doc = "Saturating subtract"]
15569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_x)"]
15570#[inline(always)]
15571#[target_feature(enable = "sve,sve2")]
15572#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15573#[cfg_attr(test, assert_instr(uqsub))]
15574pub fn svqsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
15575 svqsub_u8_x(pg, op1, svdup_n_u8(op2))
15576}
15577#[doc = "Saturating subtract"]
15578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_z)"]
15579#[inline(always)]
15580#[target_feature(enable = "sve,sve2")]
15581#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15582#[cfg_attr(test, assert_instr(uqsub))]
15583pub fn svqsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
15584 svqsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
15585}
15586#[doc = "Saturating subtract"]
15587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_z)"]
15588#[inline(always)]
15589#[target_feature(enable = "sve,sve2")]
15590#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15591#[cfg_attr(test, assert_instr(uqsub))]
15592pub fn svqsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
15593 svqsub_u8_z(pg, op1, svdup_n_u8(op2))
15594}
15595#[doc = "Saturating subtract"]
15596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_m)"]
15597#[inline(always)]
15598#[target_feature(enable = "sve,sve2")]
15599#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15600#[cfg_attr(test, assert_instr(uqsub))]
15601pub fn svqsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
15602 unsafe extern "unadjusted" {
15603 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv8i16")]
15604 fn _svqsub_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
15605 }
15606 unsafe { _svqsub_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
15607}
15608#[doc = "Saturating subtract"]
15609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_m)"]
15610#[inline(always)]
15611#[target_feature(enable = "sve,sve2")]
15612#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15613#[cfg_attr(test, assert_instr(uqsub))]
15614pub fn svqsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
15615 svqsub_u16_m(pg, op1, svdup_n_u16(op2))
15616}
15617#[doc = "Saturating subtract"]
15618#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_x)"]
15619#[inline(always)]
15620#[target_feature(enable = "sve,sve2")]
15621#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15622#[cfg_attr(test, assert_instr(uqsub))]
15623pub fn svqsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
15624 svqsub_u16_m(pg, op1, op2)
15625}
15626#[doc = "Saturating subtract"]
15627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_x)"]
15628#[inline(always)]
15629#[target_feature(enable = "sve,sve2")]
15630#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15631#[cfg_attr(test, assert_instr(uqsub))]
15632pub fn svqsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
15633 svqsub_u16_x(pg, op1, svdup_n_u16(op2))
15634}
15635#[doc = "Saturating subtract"]
15636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_z)"]
15637#[inline(always)]
15638#[target_feature(enable = "sve,sve2")]
15639#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15640#[cfg_attr(test, assert_instr(uqsub))]
15641pub fn svqsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
15642 svqsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
15643}
15644#[doc = "Saturating subtract"]
15645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_z)"]
15646#[inline(always)]
15647#[target_feature(enable = "sve,sve2")]
15648#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15649#[cfg_attr(test, assert_instr(uqsub))]
15650pub fn svqsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
15651 svqsub_u16_z(pg, op1, svdup_n_u16(op2))
15652}
15653#[doc = "Saturating subtract"]
15654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_m)"]
15655#[inline(always)]
15656#[target_feature(enable = "sve,sve2")]
15657#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15658#[cfg_attr(test, assert_instr(uqsub))]
15659pub fn svqsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
15660 unsafe extern "unadjusted" {
15661 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv4i32")]
15662 fn _svqsub_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
15663 }
15664 unsafe { _svqsub_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
15665}
15666#[doc = "Saturating subtract"]
15667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_m)"]
15668#[inline(always)]
15669#[target_feature(enable = "sve,sve2")]
15670#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15671#[cfg_attr(test, assert_instr(uqsub))]
15672pub fn svqsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
15673 svqsub_u32_m(pg, op1, svdup_n_u32(op2))
15674}
15675#[doc = "Saturating subtract"]
15676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_x)"]
15677#[inline(always)]
15678#[target_feature(enable = "sve,sve2")]
15679#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15680#[cfg_attr(test, assert_instr(uqsub))]
15681pub fn svqsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
15682 svqsub_u32_m(pg, op1, op2)
15683}
15684#[doc = "Saturating subtract"]
15685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_x)"]
15686#[inline(always)]
15687#[target_feature(enable = "sve,sve2")]
15688#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15689#[cfg_attr(test, assert_instr(uqsub))]
15690pub fn svqsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
15691 svqsub_u32_x(pg, op1, svdup_n_u32(op2))
15692}
15693#[doc = "Saturating subtract"]
15694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_z)"]
15695#[inline(always)]
15696#[target_feature(enable = "sve,sve2")]
15697#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15698#[cfg_attr(test, assert_instr(uqsub))]
15699pub fn svqsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
15700 svqsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
15701}
15702#[doc = "Saturating subtract"]
15703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_z)"]
15704#[inline(always)]
15705#[target_feature(enable = "sve,sve2")]
15706#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15707#[cfg_attr(test, assert_instr(uqsub))]
15708pub fn svqsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
15709 svqsub_u32_z(pg, op1, svdup_n_u32(op2))
15710}
15711#[doc = "Saturating subtract"]
15712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_m)"]
15713#[inline(always)]
15714#[target_feature(enable = "sve,sve2")]
15715#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15716#[cfg_attr(test, assert_instr(uqsub))]
15717pub fn svqsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
15718 unsafe extern "unadjusted" {
15719 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv2i64")]
15720 fn _svqsub_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
15721 }
15722 unsafe { _svqsub_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
15723}
15724#[doc = "Saturating subtract"]
15725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_m)"]
15726#[inline(always)]
15727#[target_feature(enable = "sve,sve2")]
15728#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15729#[cfg_attr(test, assert_instr(uqsub))]
15730pub fn svqsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
15731 svqsub_u64_m(pg, op1, svdup_n_u64(op2))
15732}
15733#[doc = "Saturating subtract"]
15734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_x)"]
15735#[inline(always)]
15736#[target_feature(enable = "sve,sve2")]
15737#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15738#[cfg_attr(test, assert_instr(uqsub))]
15739pub fn svqsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
15740 svqsub_u64_m(pg, op1, op2)
15741}
15742#[doc = "Saturating subtract"]
15743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_x)"]
15744#[inline(always)]
15745#[target_feature(enable = "sve,sve2")]
15746#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15747#[cfg_attr(test, assert_instr(uqsub))]
15748pub fn svqsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
15749 svqsub_u64_x(pg, op1, svdup_n_u64(op2))
15750}
15751#[doc = "Saturating subtract"]
15752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_z)"]
15753#[inline(always)]
15754#[target_feature(enable = "sve,sve2")]
15755#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15756#[cfg_attr(test, assert_instr(uqsub))]
15757pub fn svqsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
15758 svqsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
15759}
15760#[doc = "Saturating subtract"]
15761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_z)"]
15762#[inline(always)]
15763#[target_feature(enable = "sve,sve2")]
15764#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15765#[cfg_attr(test, assert_instr(uqsub))]
15766pub fn svqsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
15767 svqsub_u64_z(pg, op1, svdup_n_u64(op2))
15768}
15769#[doc = "Saturating subtract reversed"]
15770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_m)"]
15771#[inline(always)]
15772#[target_feature(enable = "sve,sve2")]
15773#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15774#[cfg_attr(test, assert_instr(sqsubr))]
15775pub fn svqsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
15776 unsafe extern "unadjusted" {
15777 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv16i8")]
15778 fn _svqsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
15779 }
15780 unsafe { _svqsubr_s8_m(pg, op1, op2) }
15781}
15782#[doc = "Saturating subtract reversed"]
15783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_m)"]
15784#[inline(always)]
15785#[target_feature(enable = "sve,sve2")]
15786#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15787#[cfg_attr(test, assert_instr(sqsubr))]
15788pub fn svqsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
15789 svqsubr_s8_m(pg, op1, svdup_n_s8(op2))
15790}
15791#[doc = "Saturating subtract reversed"]
15792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_x)"]
15793#[inline(always)]
15794#[target_feature(enable = "sve,sve2")]
15795#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15796#[cfg_attr(test, assert_instr(sqsubr))]
15797pub fn svqsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
15798 svqsubr_s8_m(pg, op1, op2)
15799}
15800#[doc = "Saturating subtract reversed"]
15801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_x)"]
15802#[inline(always)]
15803#[target_feature(enable = "sve,sve2")]
15804#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15805#[cfg_attr(test, assert_instr(sqsubr))]
15806pub fn svqsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
15807 svqsubr_s8_x(pg, op1, svdup_n_s8(op2))
15808}
15809#[doc = "Saturating subtract reversed"]
15810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_z)"]
15811#[inline(always)]
15812#[target_feature(enable = "sve,sve2")]
15813#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15814#[cfg_attr(test, assert_instr(sqsubr))]
15815pub fn svqsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
15816 svqsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
15817}
15818#[doc = "Saturating subtract reversed"]
15819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_z)"]
15820#[inline(always)]
15821#[target_feature(enable = "sve,sve2")]
15822#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15823#[cfg_attr(test, assert_instr(sqsubr))]
15824pub fn svqsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
15825 svqsubr_s8_z(pg, op1, svdup_n_s8(op2))
15826}
15827#[doc = "Saturating subtract reversed"]
15828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_m)"]
15829#[inline(always)]
15830#[target_feature(enable = "sve,sve2")]
15831#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15832#[cfg_attr(test, assert_instr(sqsubr))]
15833pub fn svqsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
15834 unsafe extern "unadjusted" {
15835 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv8i16")]
15836 fn _svqsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
15837 }
15838 unsafe { _svqsubr_s16_m(pg.sve_into(), op1, op2) }
15839}
15840#[doc = "Saturating subtract reversed"]
15841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_m)"]
15842#[inline(always)]
15843#[target_feature(enable = "sve,sve2")]
15844#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15845#[cfg_attr(test, assert_instr(sqsubr))]
15846pub fn svqsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
15847 svqsubr_s16_m(pg, op1, svdup_n_s16(op2))
15848}
15849#[doc = "Saturating subtract reversed"]
15850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_x)"]
15851#[inline(always)]
15852#[target_feature(enable = "sve,sve2")]
15853#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15854#[cfg_attr(test, assert_instr(sqsubr))]
15855pub fn svqsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
15856 svqsubr_s16_m(pg, op1, op2)
15857}
15858#[doc = "Saturating subtract reversed"]
15859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_x)"]
15860#[inline(always)]
15861#[target_feature(enable = "sve,sve2")]
15862#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15863#[cfg_attr(test, assert_instr(sqsubr))]
15864pub fn svqsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
15865 svqsubr_s16_x(pg, op1, svdup_n_s16(op2))
15866}
15867#[doc = "Saturating subtract reversed"]
15868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_z)"]
15869#[inline(always)]
15870#[target_feature(enable = "sve,sve2")]
15871#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15872#[cfg_attr(test, assert_instr(sqsubr))]
15873pub fn svqsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
15874 svqsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
15875}
15876#[doc = "Saturating subtract reversed"]
15877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_z)"]
15878#[inline(always)]
15879#[target_feature(enable = "sve,sve2")]
15880#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15881#[cfg_attr(test, assert_instr(sqsubr))]
15882pub fn svqsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
15883 svqsubr_s16_z(pg, op1, svdup_n_s16(op2))
15884}
15885#[doc = "Saturating subtract reversed"]
15886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_m)"]
15887#[inline(always)]
15888#[target_feature(enable = "sve,sve2")]
15889#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15890#[cfg_attr(test, assert_instr(sqsubr))]
15891pub fn svqsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
15892 unsafe extern "unadjusted" {
15893 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv4i32")]
15894 fn _svqsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
15895 }
15896 unsafe { _svqsubr_s32_m(pg.sve_into(), op1, op2) }
15897}
15898#[doc = "Saturating subtract reversed"]
15899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_m)"]
15900#[inline(always)]
15901#[target_feature(enable = "sve,sve2")]
15902#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15903#[cfg_attr(test, assert_instr(sqsubr))]
15904pub fn svqsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
15905 svqsubr_s32_m(pg, op1, svdup_n_s32(op2))
15906}
15907#[doc = "Saturating subtract reversed"]
15908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_x)"]
15909#[inline(always)]
15910#[target_feature(enable = "sve,sve2")]
15911#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15912#[cfg_attr(test, assert_instr(sqsubr))]
15913pub fn svqsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
15914 svqsubr_s32_m(pg, op1, op2)
15915}
15916#[doc = "Saturating subtract reversed"]
15917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_x)"]
15918#[inline(always)]
15919#[target_feature(enable = "sve,sve2")]
15920#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15921#[cfg_attr(test, assert_instr(sqsubr))]
15922pub fn svqsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
15923 svqsubr_s32_x(pg, op1, svdup_n_s32(op2))
15924}
15925#[doc = "Saturating subtract reversed"]
15926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_z)"]
15927#[inline(always)]
15928#[target_feature(enable = "sve,sve2")]
15929#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15930#[cfg_attr(test, assert_instr(sqsubr))]
15931pub fn svqsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
15932 svqsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
15933}
15934#[doc = "Saturating subtract reversed"]
15935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_z)"]
15936#[inline(always)]
15937#[target_feature(enable = "sve,sve2")]
15938#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15939#[cfg_attr(test, assert_instr(sqsubr))]
15940pub fn svqsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
15941 svqsubr_s32_z(pg, op1, svdup_n_s32(op2))
15942}
15943#[doc = "Saturating subtract reversed"]
15944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_m)"]
15945#[inline(always)]
15946#[target_feature(enable = "sve,sve2")]
15947#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15948#[cfg_attr(test, assert_instr(sqsubr))]
15949pub fn svqsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
15950 unsafe extern "unadjusted" {
15951 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv2i64")]
15952 fn _svqsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
15953 }
15954 unsafe { _svqsubr_s64_m(pg.sve_into(), op1, op2) }
15955}
15956#[doc = "Saturating subtract reversed"]
15957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_m)"]
15958#[inline(always)]
15959#[target_feature(enable = "sve,sve2")]
15960#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15961#[cfg_attr(test, assert_instr(sqsubr))]
15962pub fn svqsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
15963 svqsubr_s64_m(pg, op1, svdup_n_s64(op2))
15964}
15965#[doc = "Saturating subtract reversed"]
15966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_x)"]
15967#[inline(always)]
15968#[target_feature(enable = "sve,sve2")]
15969#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15970#[cfg_attr(test, assert_instr(sqsubr))]
15971pub fn svqsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
15972 svqsubr_s64_m(pg, op1, op2)
15973}
15974#[doc = "Saturating subtract reversed"]
15975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_x)"]
15976#[inline(always)]
15977#[target_feature(enable = "sve,sve2")]
15978#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15979#[cfg_attr(test, assert_instr(sqsubr))]
15980pub fn svqsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
15981 svqsubr_s64_x(pg, op1, svdup_n_s64(op2))
15982}
15983#[doc = "Saturating subtract reversed"]
15984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_z)"]
15985#[inline(always)]
15986#[target_feature(enable = "sve,sve2")]
15987#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15988#[cfg_attr(test, assert_instr(sqsubr))]
15989pub fn svqsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
15990 svqsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
15991}
15992#[doc = "Saturating subtract reversed"]
15993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_z)"]
15994#[inline(always)]
15995#[target_feature(enable = "sve,sve2")]
15996#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15997#[cfg_attr(test, assert_instr(sqsubr))]
15998pub fn svqsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
15999 svqsubr_s64_z(pg, op1, svdup_n_s64(op2))
16000}
16001#[doc = "Saturating subtract reversed"]
16002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_m)"]
16003#[inline(always)]
16004#[target_feature(enable = "sve,sve2")]
16005#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16006#[cfg_attr(test, assert_instr(uqsubr))]
16007pub fn svqsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
16008 unsafe extern "unadjusted" {
16009 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv16i8")]
16010 fn _svqsubr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
16011 }
16012 unsafe { _svqsubr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
16013}
16014#[doc = "Saturating subtract reversed"]
16015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_m)"]
16016#[inline(always)]
16017#[target_feature(enable = "sve,sve2")]
16018#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16019#[cfg_attr(test, assert_instr(uqsubr))]
16020pub fn svqsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
16021 svqsubr_u8_m(pg, op1, svdup_n_u8(op2))
16022}
16023#[doc = "Saturating subtract reversed"]
16024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_x)"]
16025#[inline(always)]
16026#[target_feature(enable = "sve,sve2")]
16027#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16028#[cfg_attr(test, assert_instr(uqsubr))]
16029pub fn svqsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
16030 svqsubr_u8_m(pg, op1, op2)
16031}
16032#[doc = "Saturating subtract reversed"]
16033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_x)"]
16034#[inline(always)]
16035#[target_feature(enable = "sve,sve2")]
16036#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16037#[cfg_attr(test, assert_instr(uqsubr))]
16038pub fn svqsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
16039 svqsubr_u8_x(pg, op1, svdup_n_u8(op2))
16040}
16041#[doc = "Saturating subtract reversed"]
16042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_z)"]
16043#[inline(always)]
16044#[target_feature(enable = "sve,sve2")]
16045#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16046#[cfg_attr(test, assert_instr(uqsubr))]
16047pub fn svqsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
16048 svqsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
16049}
16050#[doc = "Saturating subtract reversed"]
16051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_z)"]
16052#[inline(always)]
16053#[target_feature(enable = "sve,sve2")]
16054#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16055#[cfg_attr(test, assert_instr(uqsubr))]
16056pub fn svqsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
16057 svqsubr_u8_z(pg, op1, svdup_n_u8(op2))
16058}
16059#[doc = "Saturating subtract reversed"]
16060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_m)"]
16061#[inline(always)]
16062#[target_feature(enable = "sve,sve2")]
16063#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16064#[cfg_attr(test, assert_instr(uqsubr))]
16065pub fn svqsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
16066 unsafe extern "unadjusted" {
16067 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv8i16")]
16068 fn _svqsubr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
16069 }
16070 unsafe { _svqsubr_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
16071}
16072#[doc = "Saturating subtract reversed"]
16073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_m)"]
16074#[inline(always)]
16075#[target_feature(enable = "sve,sve2")]
16076#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16077#[cfg_attr(test, assert_instr(uqsubr))]
16078pub fn svqsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
16079 svqsubr_u16_m(pg, op1, svdup_n_u16(op2))
16080}
16081#[doc = "Saturating subtract reversed"]
16082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_x)"]
16083#[inline(always)]
16084#[target_feature(enable = "sve,sve2")]
16085#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16086#[cfg_attr(test, assert_instr(uqsubr))]
16087pub fn svqsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
16088 svqsubr_u16_m(pg, op1, op2)
16089}
16090#[doc = "Saturating subtract reversed"]
16091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_x)"]
16092#[inline(always)]
16093#[target_feature(enable = "sve,sve2")]
16094#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16095#[cfg_attr(test, assert_instr(uqsubr))]
16096pub fn svqsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
16097 svqsubr_u16_x(pg, op1, svdup_n_u16(op2))
16098}
16099#[doc = "Saturating subtract reversed"]
16100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_z)"]
16101#[inline(always)]
16102#[target_feature(enable = "sve,sve2")]
16103#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16104#[cfg_attr(test, assert_instr(uqsubr))]
16105pub fn svqsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
16106 svqsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
16107}
16108#[doc = "Saturating subtract reversed"]
16109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_z)"]
16110#[inline(always)]
16111#[target_feature(enable = "sve,sve2")]
16112#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16113#[cfg_attr(test, assert_instr(uqsubr))]
16114pub fn svqsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
16115 svqsubr_u16_z(pg, op1, svdup_n_u16(op2))
16116}
16117#[doc = "Saturating subtract reversed"]
16118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_m)"]
16119#[inline(always)]
16120#[target_feature(enable = "sve,sve2")]
16121#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16122#[cfg_attr(test, assert_instr(uqsubr))]
16123pub fn svqsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
16124 unsafe extern "unadjusted" {
16125 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv4i32")]
16126 fn _svqsubr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
16127 }
16128 unsafe { _svqsubr_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
16129}
16130#[doc = "Saturating subtract reversed"]
16131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_m)"]
16132#[inline(always)]
16133#[target_feature(enable = "sve,sve2")]
16134#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16135#[cfg_attr(test, assert_instr(uqsubr))]
16136pub fn svqsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
16137 svqsubr_u32_m(pg, op1, svdup_n_u32(op2))
16138}
16139#[doc = "Saturating subtract reversed"]
16140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_x)"]
16141#[inline(always)]
16142#[target_feature(enable = "sve,sve2")]
16143#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16144#[cfg_attr(test, assert_instr(uqsubr))]
16145pub fn svqsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
16146 svqsubr_u32_m(pg, op1, op2)
16147}
16148#[doc = "Saturating subtract reversed"]
16149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_x)"]
16150#[inline(always)]
16151#[target_feature(enable = "sve,sve2")]
16152#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16153#[cfg_attr(test, assert_instr(uqsubr))]
16154pub fn svqsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
16155 svqsubr_u32_x(pg, op1, svdup_n_u32(op2))
16156}
16157#[doc = "Saturating subtract reversed"]
16158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_z)"]
16159#[inline(always)]
16160#[target_feature(enable = "sve,sve2")]
16161#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16162#[cfg_attr(test, assert_instr(uqsubr))]
16163pub fn svqsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
16164 svqsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
16165}
16166#[doc = "Saturating subtract reversed"]
16167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_z)"]
16168#[inline(always)]
16169#[target_feature(enable = "sve,sve2")]
16170#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16171#[cfg_attr(test, assert_instr(uqsubr))]
16172pub fn svqsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
16173 svqsubr_u32_z(pg, op1, svdup_n_u32(op2))
16174}
16175#[doc = "Saturating subtract reversed"]
16176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_m)"]
16177#[inline(always)]
16178#[target_feature(enable = "sve,sve2")]
16179#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16180#[cfg_attr(test, assert_instr(uqsubr))]
16181pub fn svqsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
16182 unsafe extern "unadjusted" {
16183 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv2i64")]
16184 fn _svqsubr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
16185 }
16186 unsafe { _svqsubr_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
16187}
16188#[doc = "Saturating subtract reversed"]
16189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_m)"]
16190#[inline(always)]
16191#[target_feature(enable = "sve,sve2")]
16192#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16193#[cfg_attr(test, assert_instr(uqsubr))]
16194pub fn svqsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
16195 svqsubr_u64_m(pg, op1, svdup_n_u64(op2))
16196}
16197#[doc = "Saturating subtract reversed"]
16198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_x)"]
16199#[inline(always)]
16200#[target_feature(enable = "sve,sve2")]
16201#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16202#[cfg_attr(test, assert_instr(uqsubr))]
16203pub fn svqsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
16204 svqsubr_u64_m(pg, op1, op2)
16205}
16206#[doc = "Saturating subtract reversed"]
16207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_x)"]
16208#[inline(always)]
16209#[target_feature(enable = "sve,sve2")]
16210#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16211#[cfg_attr(test, assert_instr(uqsubr))]
16212pub fn svqsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
16213 svqsubr_u64_x(pg, op1, svdup_n_u64(op2))
16214}
16215#[doc = "Saturating subtract reversed"]
16216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_z)"]
16217#[inline(always)]
16218#[target_feature(enable = "sve,sve2")]
16219#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16220#[cfg_attr(test, assert_instr(uqsubr))]
16221pub fn svqsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
16222 svqsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
16223}
16224#[doc = "Saturating subtract reversed"]
16225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_z)"]
16226#[inline(always)]
16227#[target_feature(enable = "sve,sve2")]
16228#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16229#[cfg_attr(test, assert_instr(uqsubr))]
16230pub fn svqsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
16231 svqsubr_u64_z(pg, op1, svdup_n_u64(op2))
16232}
16233#[doc = "Saturating extract narrow (bottom)"]
16234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s16])"]
16235#[inline(always)]
16236#[target_feature(enable = "sve,sve2")]
16237#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16238#[cfg_attr(test, assert_instr(sqxtnb))]
16239pub fn svqxtnb_s16(op: svint16_t) -> svint8_t {
16240 unsafe extern "unadjusted" {
16241 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv8i16")]
16242 fn _svqxtnb_s16(op: svint16_t) -> svint8_t;
16243 }
16244 unsafe { _svqxtnb_s16(op) }
16245}
16246#[doc = "Saturating extract narrow (bottom)"]
16247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s32])"]
16248#[inline(always)]
16249#[target_feature(enable = "sve,sve2")]
16250#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16251#[cfg_attr(test, assert_instr(sqxtnb))]
16252pub fn svqxtnb_s32(op: svint32_t) -> svint16_t {
16253 unsafe extern "unadjusted" {
16254 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv4i32")]
16255 fn _svqxtnb_s32(op: svint32_t) -> svint16_t;
16256 }
16257 unsafe { _svqxtnb_s32(op) }
16258}
16259#[doc = "Saturating extract narrow (bottom)"]
16260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s64])"]
16261#[inline(always)]
16262#[target_feature(enable = "sve,sve2")]
16263#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16264#[cfg_attr(test, assert_instr(sqxtnb))]
16265pub fn svqxtnb_s64(op: svint64_t) -> svint32_t {
16266 unsafe extern "unadjusted" {
16267 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv2i64")]
16268 fn _svqxtnb_s64(op: svint64_t) -> svint32_t;
16269 }
16270 unsafe { _svqxtnb_s64(op) }
16271}
16272#[doc = "Saturating extract narrow (bottom)"]
16273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u16])"]
16274#[inline(always)]
16275#[target_feature(enable = "sve,sve2")]
16276#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16277#[cfg_attr(test, assert_instr(uqxtnb))]
16278pub fn svqxtnb_u16(op: svuint16_t) -> svuint8_t {
16279 unsafe extern "unadjusted" {
16280 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv8i16")]
16281 fn _svqxtnb_u16(op: svint16_t) -> svint8_t;
16282 }
16283 unsafe { _svqxtnb_u16(op.as_signed()).as_unsigned() }
16284}
16285#[doc = "Saturating extract narrow (bottom)"]
16286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u32])"]
16287#[inline(always)]
16288#[target_feature(enable = "sve,sve2")]
16289#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16290#[cfg_attr(test, assert_instr(uqxtnb))]
16291pub fn svqxtnb_u32(op: svuint32_t) -> svuint16_t {
16292 unsafe extern "unadjusted" {
16293 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv4i32")]
16294 fn _svqxtnb_u32(op: svint32_t) -> svint16_t;
16295 }
16296 unsafe { _svqxtnb_u32(op.as_signed()).as_unsigned() }
16297}
16298#[doc = "Saturating extract narrow (bottom)"]
16299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u64])"]
16300#[inline(always)]
16301#[target_feature(enable = "sve,sve2")]
16302#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16303#[cfg_attr(test, assert_instr(uqxtnb))]
16304pub fn svqxtnb_u64(op: svuint64_t) -> svuint32_t {
16305 unsafe extern "unadjusted" {
16306 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv2i64")]
16307 fn _svqxtnb_u64(op: svint64_t) -> svint32_t;
16308 }
16309 unsafe { _svqxtnb_u64(op.as_signed()).as_unsigned() }
16310}
16311#[doc = "Saturating extract narrow (top)"]
16312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s16])"]
16313#[inline(always)]
16314#[target_feature(enable = "sve,sve2")]
16315#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16316#[cfg_attr(test, assert_instr(sqxtnt))]
16317pub fn svqxtnt_s16(even: svint8_t, op: svint16_t) -> svint8_t {
16318 unsafe extern "unadjusted" {
16319 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv8i16")]
16320 fn _svqxtnt_s16(even: svint8_t, op: svint16_t) -> svint8_t;
16321 }
16322 unsafe { _svqxtnt_s16(even, op) }
16323}
16324#[doc = "Saturating extract narrow (top)"]
16325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s32])"]
16326#[inline(always)]
16327#[target_feature(enable = "sve,sve2")]
16328#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16329#[cfg_attr(test, assert_instr(sqxtnt))]
16330pub fn svqxtnt_s32(even: svint16_t, op: svint32_t) -> svint16_t {
16331 unsafe extern "unadjusted" {
16332 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv4i32")]
16333 fn _svqxtnt_s32(even: svint16_t, op: svint32_t) -> svint16_t;
16334 }
16335 unsafe { _svqxtnt_s32(even, op) }
16336}
16337#[doc = "Saturating extract narrow (top)"]
16338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s64])"]
16339#[inline(always)]
16340#[target_feature(enable = "sve,sve2")]
16341#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16342#[cfg_attr(test, assert_instr(sqxtnt))]
16343pub fn svqxtnt_s64(even: svint32_t, op: svint64_t) -> svint32_t {
16344 unsafe extern "unadjusted" {
16345 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv2i64")]
16346 fn _svqxtnt_s64(even: svint32_t, op: svint64_t) -> svint32_t;
16347 }
16348 unsafe { _svqxtnt_s64(even, op) }
16349}
16350#[doc = "Saturating extract narrow (top)"]
16351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u16])"]
16352#[inline(always)]
16353#[target_feature(enable = "sve,sve2")]
16354#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16355#[cfg_attr(test, assert_instr(uqxtnt))]
16356pub fn svqxtnt_u16(even: svuint8_t, op: svuint16_t) -> svuint8_t {
16357 unsafe extern "unadjusted" {
16358 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv8i16")]
16359 fn _svqxtnt_u16(even: svint8_t, op: svint16_t) -> svint8_t;
16360 }
16361 unsafe { _svqxtnt_u16(even.as_signed(), op.as_signed()).as_unsigned() }
16362}
16363#[doc = "Saturating extract narrow (top)"]
16364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u32])"]
16365#[inline(always)]
16366#[target_feature(enable = "sve,sve2")]
16367#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16368#[cfg_attr(test, assert_instr(uqxtnt))]
16369pub fn svqxtnt_u32(even: svuint16_t, op: svuint32_t) -> svuint16_t {
16370 unsafe extern "unadjusted" {
16371 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv4i32")]
16372 fn _svqxtnt_u32(even: svint16_t, op: svint32_t) -> svint16_t;
16373 }
16374 unsafe { _svqxtnt_u32(even.as_signed(), op.as_signed()).as_unsigned() }
16375}
16376#[doc = "Saturating extract narrow (top)"]
16377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u64])"]
16378#[inline(always)]
16379#[target_feature(enable = "sve,sve2")]
16380#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16381#[cfg_attr(test, assert_instr(uqxtnt))]
16382pub fn svqxtnt_u64(even: svuint32_t, op: svuint64_t) -> svuint32_t {
16383 unsafe extern "unadjusted" {
16384 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv2i64")]
16385 fn _svqxtnt_u64(even: svint32_t, op: svint64_t) -> svint32_t;
16386 }
16387 unsafe { _svqxtnt_u64(even.as_signed(), op.as_signed()).as_unsigned() }
16388}
16389#[doc = "Saturating extract unsigned narrow (bottom)"]
16390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s16])"]
16391#[inline(always)]
16392#[target_feature(enable = "sve,sve2")]
16393#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16394#[cfg_attr(test, assert_instr(sqxtunb))]
16395pub fn svqxtunb_s16(op: svint16_t) -> svuint8_t {
16396 unsafe extern "unadjusted" {
16397 #[cfg_attr(
16398 target_arch = "aarch64",
16399 link_name = "llvm.aarch64.sve.sqxtunb.nxv8i16"
16400 )]
16401 fn _svqxtunb_s16(op: svint16_t) -> svint8_t;
16402 }
16403 unsafe { _svqxtunb_s16(op).as_unsigned() }
16404}
16405#[doc = "Saturating extract unsigned narrow (bottom)"]
16406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s32])"]
16407#[inline(always)]
16408#[target_feature(enable = "sve,sve2")]
16409#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16410#[cfg_attr(test, assert_instr(sqxtunb))]
16411pub fn svqxtunb_s32(op: svint32_t) -> svuint16_t {
16412 unsafe extern "unadjusted" {
16413 #[cfg_attr(
16414 target_arch = "aarch64",
16415 link_name = "llvm.aarch64.sve.sqxtunb.nxv4i32"
16416 )]
16417 fn _svqxtunb_s32(op: svint32_t) -> svint16_t;
16418 }
16419 unsafe { _svqxtunb_s32(op).as_unsigned() }
16420}
16421#[doc = "Saturating extract unsigned narrow (bottom)"]
16422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s64])"]
16423#[inline(always)]
16424#[target_feature(enable = "sve,sve2")]
16425#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16426#[cfg_attr(test, assert_instr(sqxtunb))]
16427pub fn svqxtunb_s64(op: svint64_t) -> svuint32_t {
16428 unsafe extern "unadjusted" {
16429 #[cfg_attr(
16430 target_arch = "aarch64",
16431 link_name = "llvm.aarch64.sve.sqxtunb.nxv2i64"
16432 )]
16433 fn _svqxtunb_s64(op: svint64_t) -> svint32_t;
16434 }
16435 unsafe { _svqxtunb_s64(op).as_unsigned() }
16436}
16437#[doc = "Saturating extract unsigned narrow (top)"]
16438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s16])"]
16439#[inline(always)]
16440#[target_feature(enable = "sve,sve2")]
16441#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16442#[cfg_attr(test, assert_instr(sqxtunt))]
16443pub fn svqxtunt_s16(even: svuint8_t, op: svint16_t) -> svuint8_t {
16444 unsafe extern "unadjusted" {
16445 #[cfg_attr(
16446 target_arch = "aarch64",
16447 link_name = "llvm.aarch64.sve.sqxtunt.nxv8i16"
16448 )]
16449 fn _svqxtunt_s16(even: svint8_t, op: svint16_t) -> svint8_t;
16450 }
16451 unsafe { _svqxtunt_s16(even.as_signed(), op).as_unsigned() }
16452}
16453#[doc = "Saturating extract unsigned narrow (top)"]
16454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s32])"]
16455#[inline(always)]
16456#[target_feature(enable = "sve,sve2")]
16457#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16458#[cfg_attr(test, assert_instr(sqxtunt))]
16459pub fn svqxtunt_s32(even: svuint16_t, op: svint32_t) -> svuint16_t {
16460 unsafe extern "unadjusted" {
16461 #[cfg_attr(
16462 target_arch = "aarch64",
16463 link_name = "llvm.aarch64.sve.sqxtunt.nxv4i32"
16464 )]
16465 fn _svqxtunt_s32(even: svint16_t, op: svint32_t) -> svint16_t;
16466 }
16467 unsafe { _svqxtunt_s32(even.as_signed(), op).as_unsigned() }
16468}
16469#[doc = "Saturating extract unsigned narrow (top)"]
16470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s64])"]
16471#[inline(always)]
16472#[target_feature(enable = "sve,sve2")]
16473#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16474#[cfg_attr(test, assert_instr(sqxtunt))]
16475pub fn svqxtunt_s64(even: svuint32_t, op: svint64_t) -> svuint32_t {
16476 unsafe extern "unadjusted" {
16477 #[cfg_attr(
16478 target_arch = "aarch64",
16479 link_name = "llvm.aarch64.sve.sqxtunt.nxv2i64"
16480 )]
16481 fn _svqxtunt_s64(even: svint32_t, op: svint64_t) -> svint32_t;
16482 }
16483 unsafe { _svqxtunt_s64(even.as_signed(), op).as_unsigned() }
16484}
16485#[doc = "Rounding add narrow high part (bottom)"]
16486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s16])"]
16487#[inline(always)]
16488#[target_feature(enable = "sve,sve2")]
16489#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16490#[cfg_attr(test, assert_instr(raddhnb))]
16491pub fn svraddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t {
16492 unsafe extern "unadjusted" {
16493 #[cfg_attr(
16494 target_arch = "aarch64",
16495 link_name = "llvm.aarch64.sve.raddhnb.nxv8i16"
16496 )]
16497 fn _svraddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t;
16498 }
16499 unsafe { _svraddhnb_s16(op1, op2) }
16500}
16501#[doc = "Rounding add narrow high part (bottom)"]
16502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s16])"]
16503#[inline(always)]
16504#[target_feature(enable = "sve,sve2")]
16505#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16506#[cfg_attr(test, assert_instr(raddhnb))]
16507pub fn svraddhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t {
16508 svraddhnb_s16(op1, svdup_n_s16(op2))
16509}
16510#[doc = "Rounding add narrow high part (bottom)"]
16511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s32])"]
16512#[inline(always)]
16513#[target_feature(enable = "sve,sve2")]
16514#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16515#[cfg_attr(test, assert_instr(raddhnb))]
16516pub fn svraddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t {
16517 unsafe extern "unadjusted" {
16518 #[cfg_attr(
16519 target_arch = "aarch64",
16520 link_name = "llvm.aarch64.sve.raddhnb.nxv4i32"
16521 )]
16522 fn _svraddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t;
16523 }
16524 unsafe { _svraddhnb_s32(op1, op2) }
16525}
16526#[doc = "Rounding add narrow high part (bottom)"]
16527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s32])"]
16528#[inline(always)]
16529#[target_feature(enable = "sve,sve2")]
16530#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16531#[cfg_attr(test, assert_instr(raddhnb))]
16532pub fn svraddhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t {
16533 svraddhnb_s32(op1, svdup_n_s32(op2))
16534}
16535#[doc = "Rounding add narrow high part (bottom)"]
16536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s64])"]
16537#[inline(always)]
16538#[target_feature(enable = "sve,sve2")]
16539#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16540#[cfg_attr(test, assert_instr(raddhnb))]
16541pub fn svraddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t {
16542 unsafe extern "unadjusted" {
16543 #[cfg_attr(
16544 target_arch = "aarch64",
16545 link_name = "llvm.aarch64.sve.raddhnb.nxv2i64"
16546 )]
16547 fn _svraddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t;
16548 }
16549 unsafe { _svraddhnb_s64(op1, op2) }
16550}
16551#[doc = "Rounding add narrow high part (bottom)"]
16552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s64])"]
16553#[inline(always)]
16554#[target_feature(enable = "sve,sve2")]
16555#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16556#[cfg_attr(test, assert_instr(raddhnb))]
16557pub fn svraddhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t {
16558 svraddhnb_s64(op1, svdup_n_s64(op2))
16559}
16560#[doc = "Rounding add narrow high part (bottom)"]
16561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u16])"]
16562#[inline(always)]
16563#[target_feature(enable = "sve,sve2")]
16564#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16565#[cfg_attr(test, assert_instr(raddhnb))]
16566pub fn svraddhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t {
16567 unsafe { svraddhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
16568}
16569#[doc = "Rounding add narrow high part (bottom)"]
16570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u16])"]
16571#[inline(always)]
16572#[target_feature(enable = "sve,sve2")]
16573#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16574#[cfg_attr(test, assert_instr(raddhnb))]
16575pub fn svraddhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t {
16576 svraddhnb_u16(op1, svdup_n_u16(op2))
16577}
16578#[doc = "Rounding add narrow high part (bottom)"]
16579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u32])"]
16580#[inline(always)]
16581#[target_feature(enable = "sve,sve2")]
16582#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16583#[cfg_attr(test, assert_instr(raddhnb))]
16584pub fn svraddhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t {
16585 unsafe { svraddhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
16586}
16587#[doc = "Rounding add narrow high part (bottom)"]
16588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u32])"]
16589#[inline(always)]
16590#[target_feature(enable = "sve,sve2")]
16591#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16592#[cfg_attr(test, assert_instr(raddhnb))]
16593pub fn svraddhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t {
16594 svraddhnb_u32(op1, svdup_n_u32(op2))
16595}
16596#[doc = "Rounding add narrow high part (bottom)"]
16597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u64])"]
16598#[inline(always)]
16599#[target_feature(enable = "sve,sve2")]
16600#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16601#[cfg_attr(test, assert_instr(raddhnb))]
16602pub fn svraddhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t {
16603 unsafe { svraddhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
16604}
16605#[doc = "Rounding add narrow high part (bottom)"]
16606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u64])"]
16607#[inline(always)]
16608#[target_feature(enable = "sve,sve2")]
16609#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16610#[cfg_attr(test, assert_instr(raddhnb))]
16611pub fn svraddhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t {
16612 svraddhnb_u64(op1, svdup_n_u64(op2))
16613}
16614#[doc = "Rounding add narrow high part (top)"]
16615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s16])"]
16616#[inline(always)]
16617#[target_feature(enable = "sve,sve2")]
16618#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16619#[cfg_attr(test, assert_instr(raddhnt))]
16620pub fn svraddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t {
16621 unsafe extern "unadjusted" {
16622 #[cfg_attr(
16623 target_arch = "aarch64",
16624 link_name = "llvm.aarch64.sve.raddhnt.nxv8i16"
16625 )]
16626 fn _svraddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t;
16627 }
16628 unsafe { _svraddhnt_s16(even, op1, op2) }
16629}
16630#[doc = "Rounding add narrow high part (top)"]
16631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s16])"]
16632#[inline(always)]
16633#[target_feature(enable = "sve,sve2")]
16634#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16635#[cfg_attr(test, assert_instr(raddhnt))]
16636pub fn svraddhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t {
16637 svraddhnt_s16(even, op1, svdup_n_s16(op2))
16638}
16639#[doc = "Rounding add narrow high part (top)"]
16640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s32])"]
16641#[inline(always)]
16642#[target_feature(enable = "sve,sve2")]
16643#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16644#[cfg_attr(test, assert_instr(raddhnt))]
16645pub fn svraddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t {
16646 unsafe extern "unadjusted" {
16647 #[cfg_attr(
16648 target_arch = "aarch64",
16649 link_name = "llvm.aarch64.sve.raddhnt.nxv4i32"
16650 )]
16651 fn _svraddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t;
16652 }
16653 unsafe { _svraddhnt_s32(even, op1, op2) }
16654}
16655#[doc = "Rounding add narrow high part (top)"]
16656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s32])"]
16657#[inline(always)]
16658#[target_feature(enable = "sve,sve2")]
16659#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16660#[cfg_attr(test, assert_instr(raddhnt))]
16661pub fn svraddhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t {
16662 svraddhnt_s32(even, op1, svdup_n_s32(op2))
16663}
16664#[doc = "Rounding add narrow high part (top)"]
16665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s64])"]
16666#[inline(always)]
16667#[target_feature(enable = "sve,sve2")]
16668#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16669#[cfg_attr(test, assert_instr(raddhnt))]
16670pub fn svraddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t {
16671 unsafe extern "unadjusted" {
16672 #[cfg_attr(
16673 target_arch = "aarch64",
16674 link_name = "llvm.aarch64.sve.raddhnt.nxv2i64"
16675 )]
16676 fn _svraddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t;
16677 }
16678 unsafe { _svraddhnt_s64(even, op1, op2) }
16679}
16680#[doc = "Rounding add narrow high part (top)"]
16681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s64])"]
16682#[inline(always)]
16683#[target_feature(enable = "sve,sve2")]
16684#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16685#[cfg_attr(test, assert_instr(raddhnt))]
16686pub fn svraddhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t {
16687 svraddhnt_s64(even, op1, svdup_n_s64(op2))
16688}
16689#[doc = "Rounding add narrow high part (top)"]
16690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u16])"]
16691#[inline(always)]
16692#[target_feature(enable = "sve,sve2")]
16693#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16694#[cfg_attr(test, assert_instr(raddhnt))]
16695pub fn svraddhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t {
16696 unsafe { svraddhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
16697}
16698#[doc = "Rounding add narrow high part (top)"]
16699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u16])"]
16700#[inline(always)]
16701#[target_feature(enable = "sve,sve2")]
16702#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16703#[cfg_attr(test, assert_instr(raddhnt))]
16704pub fn svraddhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t {
16705 svraddhnt_u16(even, op1, svdup_n_u16(op2))
16706}
16707#[doc = "Rounding add narrow high part (top)"]
16708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u32])"]
16709#[inline(always)]
16710#[target_feature(enable = "sve,sve2")]
16711#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16712#[cfg_attr(test, assert_instr(raddhnt))]
16713pub fn svraddhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t {
16714 unsafe { svraddhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
16715}
16716#[doc = "Rounding add narrow high part (top)"]
16717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u32])"]
16718#[inline(always)]
16719#[target_feature(enable = "sve,sve2")]
16720#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16721#[cfg_attr(test, assert_instr(raddhnt))]
16722pub fn svraddhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t {
16723 svraddhnt_u32(even, op1, svdup_n_u32(op2))
16724}
16725#[doc = "Rounding add narrow high part (top)"]
16726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u64])"]
16727#[inline(always)]
16728#[target_feature(enable = "sve,sve2")]
16729#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16730#[cfg_attr(test, assert_instr(raddhnt))]
16731pub fn svraddhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t {
16732 unsafe { svraddhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
16733}
16734#[doc = "Rounding add narrow high part (top)"]
16735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u64])"]
16736#[inline(always)]
16737#[target_feature(enable = "sve,sve2")]
16738#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16739#[cfg_attr(test, assert_instr(raddhnt))]
16740pub fn svraddhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t {
16741 svraddhnt_u64(even, op1, svdup_n_u64(op2))
16742}
16743#[doc = "Bitwise rotate left by 1 and exclusive OR"]
16744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrax1[_s64])"]
16745#[inline(always)]
16746#[target_feature(enable = "sve,sve2,sve2-sha3")]
16747#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16748#[cfg_attr(test, assert_instr(rax1))]
16749pub fn svrax1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
16750 unsafe extern "unadjusted" {
16751 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rax1")]
16752 fn _svrax1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
16753 }
16754 unsafe { _svrax1_s64(op1, op2) }
16755}
16756#[doc = "Bitwise rotate left by 1 and exclusive OR"]
16757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrax1[_u64])"]
16758#[inline(always)]
16759#[target_feature(enable = "sve,sve2,sve2-sha3")]
16760#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16761#[cfg_attr(test, assert_instr(rax1))]
16762pub fn svrax1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
16763 unsafe { svrax1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
16764}
16765#[doc = "Reciprocal estimate"]
16766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_m)"]
16767#[inline(always)]
16768#[target_feature(enable = "sve,sve2")]
16769#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16770#[cfg_attr(test, assert_instr(urecpe))]
16771pub fn svrecpe_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
16772 unsafe extern "unadjusted" {
16773 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urecpe.nxv4i32")]
16774 fn _svrecpe_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
16775 }
16776 unsafe { _svrecpe_u32_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() }
16777}
16778#[doc = "Reciprocal estimate"]
16779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_x)"]
16780#[inline(always)]
16781#[target_feature(enable = "sve,sve2")]
16782#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16783#[cfg_attr(test, assert_instr(urecpe))]
16784pub fn svrecpe_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
16785 svrecpe_u32_m(op, pg, op)
16786}
16787#[doc = "Reciprocal estimate"]
16788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_z)"]
16789#[inline(always)]
16790#[target_feature(enable = "sve,sve2")]
16791#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16792#[cfg_attr(test, assert_instr(urecpe))]
16793pub fn svrecpe_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
16794 svrecpe_u32_m(svdup_n_u32(0), pg, op)
16795}
16796#[doc = "Rounding halving add"]
16797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_m)"]
16798#[inline(always)]
16799#[target_feature(enable = "sve,sve2")]
16800#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16801#[cfg_attr(test, assert_instr(srhadd))]
16802pub fn svrhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
16803 unsafe extern "unadjusted" {
16804 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv16i8")]
16805 fn _svrhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
16806 }
16807 unsafe { _svrhadd_s8_m(pg, op1, op2) }
16808}
16809#[doc = "Rounding halving add"]
16810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_m)"]
16811#[inline(always)]
16812#[target_feature(enable = "sve,sve2")]
16813#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16814#[cfg_attr(test, assert_instr(srhadd))]
16815pub fn svrhadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
16816 svrhadd_s8_m(pg, op1, svdup_n_s8(op2))
16817}
16818#[doc = "Rounding halving add"]
16819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_x)"]
16820#[inline(always)]
16821#[target_feature(enable = "sve,sve2")]
16822#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16823#[cfg_attr(test, assert_instr(srhadd))]
16824pub fn svrhadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
16825 svrhadd_s8_m(pg, op1, op2)
16826}
16827#[doc = "Rounding halving add"]
16828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_x)"]
16829#[inline(always)]
16830#[target_feature(enable = "sve,sve2")]
16831#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16832#[cfg_attr(test, assert_instr(srhadd))]
16833pub fn svrhadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
16834 svrhadd_s8_x(pg, op1, svdup_n_s8(op2))
16835}
16836#[doc = "Rounding halving add"]
16837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_z)"]
16838#[inline(always)]
16839#[target_feature(enable = "sve,sve2")]
16840#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16841#[cfg_attr(test, assert_instr(srhadd))]
16842pub fn svrhadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
16843 svrhadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
16844}
16845#[doc = "Rounding halving add"]
16846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_z)"]
16847#[inline(always)]
16848#[target_feature(enable = "sve,sve2")]
16849#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16850#[cfg_attr(test, assert_instr(srhadd))]
16851pub fn svrhadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
16852 svrhadd_s8_z(pg, op1, svdup_n_s8(op2))
16853}
16854#[doc = "Rounding halving add"]
16855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_m)"]
16856#[inline(always)]
16857#[target_feature(enable = "sve,sve2")]
16858#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16859#[cfg_attr(test, assert_instr(srhadd))]
16860pub fn svrhadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
16861 unsafe extern "unadjusted" {
16862 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv8i16")]
16863 fn _svrhadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
16864 }
16865 unsafe { _svrhadd_s16_m(pg.sve_into(), op1, op2) }
16866}
16867#[doc = "Rounding halving add"]
16868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_m)"]
16869#[inline(always)]
16870#[target_feature(enable = "sve,sve2")]
16871#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16872#[cfg_attr(test, assert_instr(srhadd))]
16873pub fn svrhadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
16874 svrhadd_s16_m(pg, op1, svdup_n_s16(op2))
16875}
16876#[doc = "Rounding halving add"]
16877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_x)"]
16878#[inline(always)]
16879#[target_feature(enable = "sve,sve2")]
16880#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16881#[cfg_attr(test, assert_instr(srhadd))]
16882pub fn svrhadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
16883 svrhadd_s16_m(pg, op1, op2)
16884}
16885#[doc = "Rounding halving add"]
16886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_x)"]
16887#[inline(always)]
16888#[target_feature(enable = "sve,sve2")]
16889#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16890#[cfg_attr(test, assert_instr(srhadd))]
16891pub fn svrhadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
16892 svrhadd_s16_x(pg, op1, svdup_n_s16(op2))
16893}
16894#[doc = "Rounding halving add"]
16895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_z)"]
16896#[inline(always)]
16897#[target_feature(enable = "sve,sve2")]
16898#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16899#[cfg_attr(test, assert_instr(srhadd))]
16900pub fn svrhadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
16901 svrhadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
16902}
16903#[doc = "Rounding halving add"]
16904#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_z)"]
16905#[inline(always)]
16906#[target_feature(enable = "sve,sve2")]
16907#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16908#[cfg_attr(test, assert_instr(srhadd))]
16909pub fn svrhadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
16910 svrhadd_s16_z(pg, op1, svdup_n_s16(op2))
16911}
16912#[doc = "Rounding halving add"]
16913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_m)"]
16914#[inline(always)]
16915#[target_feature(enable = "sve,sve2")]
16916#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16917#[cfg_attr(test, assert_instr(srhadd))]
16918pub fn svrhadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
16919 unsafe extern "unadjusted" {
16920 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv4i32")]
16921 fn _svrhadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
16922 }
16923 unsafe { _svrhadd_s32_m(pg.sve_into(), op1, op2) }
16924}
16925#[doc = "Rounding halving add"]
16926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_m)"]
16927#[inline(always)]
16928#[target_feature(enable = "sve,sve2")]
16929#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16930#[cfg_attr(test, assert_instr(srhadd))]
16931pub fn svrhadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
16932 svrhadd_s32_m(pg, op1, svdup_n_s32(op2))
16933}
16934#[doc = "Rounding halving add"]
16935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_x)"]
16936#[inline(always)]
16937#[target_feature(enable = "sve,sve2")]
16938#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16939#[cfg_attr(test, assert_instr(srhadd))]
16940pub fn svrhadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
16941 svrhadd_s32_m(pg, op1, op2)
16942}
16943#[doc = "Rounding halving add"]
16944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_x)"]
16945#[inline(always)]
16946#[target_feature(enable = "sve,sve2")]
16947#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16948#[cfg_attr(test, assert_instr(srhadd))]
16949pub fn svrhadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
16950 svrhadd_s32_x(pg, op1, svdup_n_s32(op2))
16951}
16952#[doc = "Rounding halving add"]
16953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_z)"]
16954#[inline(always)]
16955#[target_feature(enable = "sve,sve2")]
16956#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16957#[cfg_attr(test, assert_instr(srhadd))]
16958pub fn svrhadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
16959 svrhadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
16960}
16961#[doc = "Rounding halving add"]
16962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_z)"]
16963#[inline(always)]
16964#[target_feature(enable = "sve,sve2")]
16965#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16966#[cfg_attr(test, assert_instr(srhadd))]
16967pub fn svrhadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
16968 svrhadd_s32_z(pg, op1, svdup_n_s32(op2))
16969}
16970#[doc = "Rounding halving add"]
16971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_m)"]
16972#[inline(always)]
16973#[target_feature(enable = "sve,sve2")]
16974#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16975#[cfg_attr(test, assert_instr(srhadd))]
16976pub fn svrhadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
16977 unsafe extern "unadjusted" {
16978 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv2i64")]
16979 fn _svrhadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
16980 }
16981 unsafe { _svrhadd_s64_m(pg.sve_into(), op1, op2) }
16982}
16983#[doc = "Rounding halving add"]
16984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_m)"]
16985#[inline(always)]
16986#[target_feature(enable = "sve,sve2")]
16987#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16988#[cfg_attr(test, assert_instr(srhadd))]
16989pub fn svrhadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
16990 svrhadd_s64_m(pg, op1, svdup_n_s64(op2))
16991}
16992#[doc = "Rounding halving add"]
16993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_x)"]
16994#[inline(always)]
16995#[target_feature(enable = "sve,sve2")]
16996#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16997#[cfg_attr(test, assert_instr(srhadd))]
16998pub fn svrhadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
16999 svrhadd_s64_m(pg, op1, op2)
17000}
17001#[doc = "Rounding halving add"]
17002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_x)"]
17003#[inline(always)]
17004#[target_feature(enable = "sve,sve2")]
17005#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17006#[cfg_attr(test, assert_instr(srhadd))]
17007pub fn svrhadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
17008 svrhadd_s64_x(pg, op1, svdup_n_s64(op2))
17009}
17010#[doc = "Rounding halving add"]
17011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_z)"]
17012#[inline(always)]
17013#[target_feature(enable = "sve,sve2")]
17014#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17015#[cfg_attr(test, assert_instr(srhadd))]
17016pub fn svrhadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
17017 svrhadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
17018}
17019#[doc = "Rounding halving add"]
17020#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_z)"]
17021#[inline(always)]
17022#[target_feature(enable = "sve,sve2")]
17023#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17024#[cfg_attr(test, assert_instr(srhadd))]
17025pub fn svrhadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
17026 svrhadd_s64_z(pg, op1, svdup_n_s64(op2))
17027}
17028#[doc = "Rounding halving add"]
17029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_m)"]
17030#[inline(always)]
17031#[target_feature(enable = "sve,sve2")]
17032#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17033#[cfg_attr(test, assert_instr(urhadd))]
17034pub fn svrhadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
17035 unsafe extern "unadjusted" {
17036 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv16i8")]
17037 fn _svrhadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
17038 }
17039 unsafe { _svrhadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
17040}
17041#[doc = "Rounding halving add"]
17042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_m)"]
17043#[inline(always)]
17044#[target_feature(enable = "sve,sve2")]
17045#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17046#[cfg_attr(test, assert_instr(urhadd))]
17047pub fn svrhadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
17048 svrhadd_u8_m(pg, op1, svdup_n_u8(op2))
17049}
17050#[doc = "Rounding halving add"]
17051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_x)"]
17052#[inline(always)]
17053#[target_feature(enable = "sve,sve2")]
17054#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17055#[cfg_attr(test, assert_instr(urhadd))]
17056pub fn svrhadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
17057 svrhadd_u8_m(pg, op1, op2)
17058}
17059#[doc = "Rounding halving add"]
17060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_x)"]
17061#[inline(always)]
17062#[target_feature(enable = "sve,sve2")]
17063#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17064#[cfg_attr(test, assert_instr(urhadd))]
17065pub fn svrhadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
17066 svrhadd_u8_x(pg, op1, svdup_n_u8(op2))
17067}
17068#[doc = "Rounding halving add"]
17069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_z)"]
17070#[inline(always)]
17071#[target_feature(enable = "sve,sve2")]
17072#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17073#[cfg_attr(test, assert_instr(urhadd))]
17074pub fn svrhadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
17075 svrhadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
17076}
17077#[doc = "Rounding halving add"]
17078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_z)"]
17079#[inline(always)]
17080#[target_feature(enable = "sve,sve2")]
17081#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17082#[cfg_attr(test, assert_instr(urhadd))]
17083pub fn svrhadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
17084 svrhadd_u8_z(pg, op1, svdup_n_u8(op2))
17085}
17086#[doc = "Rounding halving add"]
17087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_m)"]
17088#[inline(always)]
17089#[target_feature(enable = "sve,sve2")]
17090#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17091#[cfg_attr(test, assert_instr(urhadd))]
17092pub fn svrhadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
17093 unsafe extern "unadjusted" {
17094 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv8i16")]
17095 fn _svrhadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
17096 }
17097 unsafe { _svrhadd_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
17098}
17099#[doc = "Rounding halving add"]
17100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_m)"]
17101#[inline(always)]
17102#[target_feature(enable = "sve,sve2")]
17103#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17104#[cfg_attr(test, assert_instr(urhadd))]
17105pub fn svrhadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
17106 svrhadd_u16_m(pg, op1, svdup_n_u16(op2))
17107}
17108#[doc = "Rounding halving add"]
17109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_x)"]
17110#[inline(always)]
17111#[target_feature(enable = "sve,sve2")]
17112#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17113#[cfg_attr(test, assert_instr(urhadd))]
17114pub fn svrhadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
17115 svrhadd_u16_m(pg, op1, op2)
17116}
17117#[doc = "Rounding halving add"]
17118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_x)"]
17119#[inline(always)]
17120#[target_feature(enable = "sve,sve2")]
17121#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17122#[cfg_attr(test, assert_instr(urhadd))]
17123pub fn svrhadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
17124 svrhadd_u16_x(pg, op1, svdup_n_u16(op2))
17125}
17126#[doc = "Rounding halving add"]
17127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_z)"]
17128#[inline(always)]
17129#[target_feature(enable = "sve,sve2")]
17130#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17131#[cfg_attr(test, assert_instr(urhadd))]
17132pub fn svrhadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
17133 svrhadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
17134}
17135#[doc = "Rounding halving add"]
17136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_z)"]
17137#[inline(always)]
17138#[target_feature(enable = "sve,sve2")]
17139#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17140#[cfg_attr(test, assert_instr(urhadd))]
17141pub fn svrhadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
17142 svrhadd_u16_z(pg, op1, svdup_n_u16(op2))
17143}
17144#[doc = "Rounding halving add"]
17145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_m)"]
17146#[inline(always)]
17147#[target_feature(enable = "sve,sve2")]
17148#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17149#[cfg_attr(test, assert_instr(urhadd))]
17150pub fn svrhadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
17151 unsafe extern "unadjusted" {
17152 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv4i32")]
17153 fn _svrhadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
17154 }
17155 unsafe { _svrhadd_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
17156}
17157#[doc = "Rounding halving add"]
17158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_m)"]
17159#[inline(always)]
17160#[target_feature(enable = "sve,sve2")]
17161#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17162#[cfg_attr(test, assert_instr(urhadd))]
17163pub fn svrhadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
17164 svrhadd_u32_m(pg, op1, svdup_n_u32(op2))
17165}
17166#[doc = "Rounding halving add"]
17167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_x)"]
17168#[inline(always)]
17169#[target_feature(enable = "sve,sve2")]
17170#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17171#[cfg_attr(test, assert_instr(urhadd))]
17172pub fn svrhadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
17173 svrhadd_u32_m(pg, op1, op2)
17174}
17175#[doc = "Rounding halving add"]
17176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_x)"]
17177#[inline(always)]
17178#[target_feature(enable = "sve,sve2")]
17179#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17180#[cfg_attr(test, assert_instr(urhadd))]
17181pub fn svrhadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
17182 svrhadd_u32_x(pg, op1, svdup_n_u32(op2))
17183}
17184#[doc = "Rounding halving add"]
17185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_z)"]
17186#[inline(always)]
17187#[target_feature(enable = "sve,sve2")]
17188#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17189#[cfg_attr(test, assert_instr(urhadd))]
17190pub fn svrhadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
17191 svrhadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
17192}
17193#[doc = "Rounding halving add"]
17194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_z)"]
17195#[inline(always)]
17196#[target_feature(enable = "sve,sve2")]
17197#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17198#[cfg_attr(test, assert_instr(urhadd))]
17199pub fn svrhadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
17200 svrhadd_u32_z(pg, op1, svdup_n_u32(op2))
17201}
17202#[doc = "Rounding halving add"]
17203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_m)"]
17204#[inline(always)]
17205#[target_feature(enable = "sve,sve2")]
17206#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17207#[cfg_attr(test, assert_instr(urhadd))]
17208pub fn svrhadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
17209 unsafe extern "unadjusted" {
17210 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv2i64")]
17211 fn _svrhadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
17212 }
17213 unsafe { _svrhadd_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
17214}
17215#[doc = "Rounding halving add"]
17216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_m)"]
17217#[inline(always)]
17218#[target_feature(enable = "sve,sve2")]
17219#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17220#[cfg_attr(test, assert_instr(urhadd))]
17221pub fn svrhadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
17222 svrhadd_u64_m(pg, op1, svdup_n_u64(op2))
17223}
17224#[doc = "Rounding halving add"]
17225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_x)"]
17226#[inline(always)]
17227#[target_feature(enable = "sve,sve2")]
17228#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17229#[cfg_attr(test, assert_instr(urhadd))]
17230pub fn svrhadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
17231 svrhadd_u64_m(pg, op1, op2)
17232}
17233#[doc = "Rounding halving add"]
17234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_x)"]
17235#[inline(always)]
17236#[target_feature(enable = "sve,sve2")]
17237#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17238#[cfg_attr(test, assert_instr(urhadd))]
17239pub fn svrhadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
17240 svrhadd_u64_x(pg, op1, svdup_n_u64(op2))
17241}
17242#[doc = "Rounding halving add"]
17243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_z)"]
17244#[inline(always)]
17245#[target_feature(enable = "sve,sve2")]
17246#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17247#[cfg_attr(test, assert_instr(urhadd))]
17248pub fn svrhadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
17249 svrhadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
17250}
17251#[doc = "Rounding halving add"]
17252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_z)"]
17253#[inline(always)]
17254#[target_feature(enable = "sve,sve2")]
17255#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17256#[cfg_attr(test, assert_instr(urhadd))]
17257pub fn svrhadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
17258 svrhadd_u64_z(pg, op1, svdup_n_u64(op2))
17259}
17260#[doc = "Rounding shift left"]
17261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_m)"]
17262#[inline(always)]
17263#[target_feature(enable = "sve,sve2")]
17264#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17265#[cfg_attr(test, assert_instr(srshl))]
17266pub fn svrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
17267 unsafe extern "unadjusted" {
17268 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv16i8")]
17269 fn _svrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
17270 }
17271 unsafe { _svrshl_s8_m(pg, op1, op2) }
17272}
17273#[doc = "Rounding shift left"]
17274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_m)"]
17275#[inline(always)]
17276#[target_feature(enable = "sve,sve2")]
17277#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17278#[cfg_attr(test, assert_instr(srshl))]
17279pub fn svrshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
17280 svrshl_s8_m(pg, op1, svdup_n_s8(op2))
17281}
17282#[doc = "Rounding shift left"]
17283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_x)"]
17284#[inline(always)]
17285#[target_feature(enable = "sve,sve2")]
17286#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17287#[cfg_attr(test, assert_instr(srshl))]
17288pub fn svrshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
17289 svrshl_s8_m(pg, op1, op2)
17290}
17291#[doc = "Rounding shift left"]
17292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_x)"]
17293#[inline(always)]
17294#[target_feature(enable = "sve,sve2")]
17295#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17296#[cfg_attr(test, assert_instr(srshl))]
17297pub fn svrshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
17298 svrshl_s8_x(pg, op1, svdup_n_s8(op2))
17299}
17300#[doc = "Rounding shift left"]
17301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_z)"]
17302#[inline(always)]
17303#[target_feature(enable = "sve,sve2")]
17304#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17305#[cfg_attr(test, assert_instr(srshl))]
17306pub fn svrshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
17307 svrshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
17308}
17309#[doc = "Rounding shift left"]
17310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_z)"]
17311#[inline(always)]
17312#[target_feature(enable = "sve,sve2")]
17313#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17314#[cfg_attr(test, assert_instr(srshl))]
17315pub fn svrshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
17316 svrshl_s8_z(pg, op1, svdup_n_s8(op2))
17317}
17318#[doc = "Rounding shift left"]
17319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_m)"]
17320#[inline(always)]
17321#[target_feature(enable = "sve,sve2")]
17322#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17323#[cfg_attr(test, assert_instr(srshl))]
17324pub fn svrshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
17325 unsafe extern "unadjusted" {
17326 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv8i16")]
17327 fn _svrshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
17328 }
17329 unsafe { _svrshl_s16_m(pg.sve_into(), op1, op2) }
17330}
17331#[doc = "Rounding shift left"]
17332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_m)"]
17333#[inline(always)]
17334#[target_feature(enable = "sve,sve2")]
17335#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17336#[cfg_attr(test, assert_instr(srshl))]
17337pub fn svrshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
17338 svrshl_s16_m(pg, op1, svdup_n_s16(op2))
17339}
17340#[doc = "Rounding shift left"]
17341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_x)"]
17342#[inline(always)]
17343#[target_feature(enable = "sve,sve2")]
17344#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17345#[cfg_attr(test, assert_instr(srshl))]
17346pub fn svrshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
17347 svrshl_s16_m(pg, op1, op2)
17348}
17349#[doc = "Rounding shift left"]
17350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_x)"]
17351#[inline(always)]
17352#[target_feature(enable = "sve,sve2")]
17353#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17354#[cfg_attr(test, assert_instr(srshl))]
17355pub fn svrshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
17356 svrshl_s16_x(pg, op1, svdup_n_s16(op2))
17357}
17358#[doc = "Rounding shift left"]
17359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_z)"]
17360#[inline(always)]
17361#[target_feature(enable = "sve,sve2")]
17362#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17363#[cfg_attr(test, assert_instr(srshl))]
17364pub fn svrshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
17365 svrshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
17366}
17367#[doc = "Rounding shift left"]
17368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_z)"]
17369#[inline(always)]
17370#[target_feature(enable = "sve,sve2")]
17371#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17372#[cfg_attr(test, assert_instr(srshl))]
17373pub fn svrshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
17374 svrshl_s16_z(pg, op1, svdup_n_s16(op2))
17375}
17376#[doc = "Rounding shift left"]
17377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_m)"]
17378#[inline(always)]
17379#[target_feature(enable = "sve,sve2")]
17380#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17381#[cfg_attr(test, assert_instr(srshl))]
17382pub fn svrshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
17383 unsafe extern "unadjusted" {
17384 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv4i32")]
17385 fn _svrshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
17386 }
17387 unsafe { _svrshl_s32_m(pg.sve_into(), op1, op2) }
17388}
17389#[doc = "Rounding shift left"]
17390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_m)"]
17391#[inline(always)]
17392#[target_feature(enable = "sve,sve2")]
17393#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17394#[cfg_attr(test, assert_instr(srshl))]
17395pub fn svrshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
17396 svrshl_s32_m(pg, op1, svdup_n_s32(op2))
17397}
17398#[doc = "Rounding shift left"]
17399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_x)"]
17400#[inline(always)]
17401#[target_feature(enable = "sve,sve2")]
17402#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17403#[cfg_attr(test, assert_instr(srshl))]
17404pub fn svrshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
17405 svrshl_s32_m(pg, op1, op2)
17406}
17407#[doc = "Rounding shift left"]
17408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_x)"]
17409#[inline(always)]
17410#[target_feature(enable = "sve,sve2")]
17411#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17412#[cfg_attr(test, assert_instr(srshl))]
17413pub fn svrshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
17414 svrshl_s32_x(pg, op1, svdup_n_s32(op2))
17415}
17416#[doc = "Rounding shift left"]
17417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_z)"]
17418#[inline(always)]
17419#[target_feature(enable = "sve,sve2")]
17420#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17421#[cfg_attr(test, assert_instr(srshl))]
17422pub fn svrshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
17423 svrshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
17424}
17425#[doc = "Rounding shift left"]
17426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_z)"]
17427#[inline(always)]
17428#[target_feature(enable = "sve,sve2")]
17429#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17430#[cfg_attr(test, assert_instr(srshl))]
17431pub fn svrshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
17432 svrshl_s32_z(pg, op1, svdup_n_s32(op2))
17433}
17434#[doc = "Rounding shift left"]
17435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_m)"]
17436#[inline(always)]
17437#[target_feature(enable = "sve,sve2")]
17438#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17439#[cfg_attr(test, assert_instr(srshl))]
17440pub fn svrshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
17441 unsafe extern "unadjusted" {
17442 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv2i64")]
17443 fn _svrshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
17444 }
17445 unsafe { _svrshl_s64_m(pg.sve_into(), op1, op2) }
17446}
17447#[doc = "Rounding shift left"]
17448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_m)"]
17449#[inline(always)]
17450#[target_feature(enable = "sve,sve2")]
17451#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17452#[cfg_attr(test, assert_instr(srshl))]
17453pub fn svrshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
17454 svrshl_s64_m(pg, op1, svdup_n_s64(op2))
17455}
17456#[doc = "Rounding shift left"]
17457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_x)"]
17458#[inline(always)]
17459#[target_feature(enable = "sve,sve2")]
17460#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17461#[cfg_attr(test, assert_instr(srshl))]
17462pub fn svrshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
17463 svrshl_s64_m(pg, op1, op2)
17464}
17465#[doc = "Rounding shift left"]
17466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_x)"]
17467#[inline(always)]
17468#[target_feature(enable = "sve,sve2")]
17469#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17470#[cfg_attr(test, assert_instr(srshl))]
17471pub fn svrshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
17472 svrshl_s64_x(pg, op1, svdup_n_s64(op2))
17473}
17474#[doc = "Rounding shift left"]
17475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_z)"]
17476#[inline(always)]
17477#[target_feature(enable = "sve,sve2")]
17478#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17479#[cfg_attr(test, assert_instr(srshl))]
17480pub fn svrshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
17481 svrshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
17482}
17483#[doc = "Rounding shift left"]
17484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_z)"]
17485#[inline(always)]
17486#[target_feature(enable = "sve,sve2")]
17487#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17488#[cfg_attr(test, assert_instr(srshl))]
17489pub fn svrshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
17490 svrshl_s64_z(pg, op1, svdup_n_s64(op2))
17491}
17492#[doc = "Rounding shift left"]
17493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_m)"]
17494#[inline(always)]
17495#[target_feature(enable = "sve,sve2")]
17496#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17497#[cfg_attr(test, assert_instr(urshl))]
17498pub fn svrshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
17499 unsafe extern "unadjusted" {
17500 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv16i8")]
17501 fn _svrshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
17502 }
17503 unsafe { _svrshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() }
17504}
17505#[doc = "Rounding shift left"]
17506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_m)"]
17507#[inline(always)]
17508#[target_feature(enable = "sve,sve2")]
17509#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17510#[cfg_attr(test, assert_instr(urshl))]
17511pub fn svrshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
17512 svrshl_u8_m(pg, op1, svdup_n_s8(op2))
17513}
17514#[doc = "Rounding shift left"]
17515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_x)"]
17516#[inline(always)]
17517#[target_feature(enable = "sve,sve2")]
17518#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17519#[cfg_attr(test, assert_instr(urshl))]
17520pub fn svrshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
17521 svrshl_u8_m(pg, op1, op2)
17522}
17523#[doc = "Rounding shift left"]
17524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_x)"]
17525#[inline(always)]
17526#[target_feature(enable = "sve,sve2")]
17527#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17528#[cfg_attr(test, assert_instr(urshl))]
17529pub fn svrshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
17530 svrshl_u8_x(pg, op1, svdup_n_s8(op2))
17531}
17532#[doc = "Rounding shift left"]
17533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_z)"]
17534#[inline(always)]
17535#[target_feature(enable = "sve,sve2")]
17536#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17537#[cfg_attr(test, assert_instr(urshl))]
17538pub fn svrshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
17539 svrshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
17540}
17541#[doc = "Rounding shift left"]
17542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_z)"]
17543#[inline(always)]
17544#[target_feature(enable = "sve,sve2")]
17545#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17546#[cfg_attr(test, assert_instr(urshl))]
17547pub fn svrshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
17548 svrshl_u8_z(pg, op1, svdup_n_s8(op2))
17549}
17550#[doc = "Rounding shift left"]
17551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_m)"]
17552#[inline(always)]
17553#[target_feature(enable = "sve,sve2")]
17554#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17555#[cfg_attr(test, assert_instr(urshl))]
17556pub fn svrshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
17557 unsafe extern "unadjusted" {
17558 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv8i16")]
17559 fn _svrshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
17560 }
17561 unsafe { _svrshl_u16_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() }
17562}
17563#[doc = "Rounding shift left"]
17564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_m)"]
17565#[inline(always)]
17566#[target_feature(enable = "sve,sve2")]
17567#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17568#[cfg_attr(test, assert_instr(urshl))]
17569pub fn svrshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
17570 svrshl_u16_m(pg, op1, svdup_n_s16(op2))
17571}
17572#[doc = "Rounding shift left"]
17573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_x)"]
17574#[inline(always)]
17575#[target_feature(enable = "sve,sve2")]
17576#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17577#[cfg_attr(test, assert_instr(urshl))]
17578pub fn svrshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
17579 svrshl_u16_m(pg, op1, op2)
17580}
17581#[doc = "Rounding shift left"]
17582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_x)"]
17583#[inline(always)]
17584#[target_feature(enable = "sve,sve2")]
17585#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17586#[cfg_attr(test, assert_instr(urshl))]
17587pub fn svrshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
17588 svrshl_u16_x(pg, op1, svdup_n_s16(op2))
17589}
17590#[doc = "Rounding shift left"]
17591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_z)"]
17592#[inline(always)]
17593#[target_feature(enable = "sve,sve2")]
17594#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17595#[cfg_attr(test, assert_instr(urshl))]
17596pub fn svrshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
17597 svrshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
17598}
17599#[doc = "Rounding shift left"]
17600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_z)"]
17601#[inline(always)]
17602#[target_feature(enable = "sve,sve2")]
17603#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17604#[cfg_attr(test, assert_instr(urshl))]
17605pub fn svrshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
17606 svrshl_u16_z(pg, op1, svdup_n_s16(op2))
17607}
17608#[doc = "Rounding shift left"]
17609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_m)"]
17610#[inline(always)]
17611#[target_feature(enable = "sve,sve2")]
17612#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17613#[cfg_attr(test, assert_instr(urshl))]
17614pub fn svrshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
17615 unsafe extern "unadjusted" {
17616 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv4i32")]
17617 fn _svrshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
17618 }
17619 unsafe { _svrshl_u32_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() }
17620}
17621#[doc = "Rounding shift left"]
17622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_m)"]
17623#[inline(always)]
17624#[target_feature(enable = "sve,sve2")]
17625#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17626#[cfg_attr(test, assert_instr(urshl))]
17627pub fn svrshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
17628 svrshl_u32_m(pg, op1, svdup_n_s32(op2))
17629}
17630#[doc = "Rounding shift left"]
17631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_x)"]
17632#[inline(always)]
17633#[target_feature(enable = "sve,sve2")]
17634#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17635#[cfg_attr(test, assert_instr(urshl))]
17636pub fn svrshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
17637 svrshl_u32_m(pg, op1, op2)
17638}
17639#[doc = "Rounding shift left"]
17640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_x)"]
17641#[inline(always)]
17642#[target_feature(enable = "sve,sve2")]
17643#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17644#[cfg_attr(test, assert_instr(urshl))]
17645pub fn svrshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
17646 svrshl_u32_x(pg, op1, svdup_n_s32(op2))
17647}
17648#[doc = "Rounding shift left"]
17649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_z)"]
17650#[inline(always)]
17651#[target_feature(enable = "sve,sve2")]
17652#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17653#[cfg_attr(test, assert_instr(urshl))]
17654pub fn svrshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
17655 svrshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
17656}
17657#[doc = "Rounding shift left"]
17658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_z)"]
17659#[inline(always)]
17660#[target_feature(enable = "sve,sve2")]
17661#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17662#[cfg_attr(test, assert_instr(urshl))]
17663pub fn svrshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
17664 svrshl_u32_z(pg, op1, svdup_n_s32(op2))
17665}
17666#[doc = "Rounding shift left"]
17667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_m)"]
17668#[inline(always)]
17669#[target_feature(enable = "sve,sve2")]
17670#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17671#[cfg_attr(test, assert_instr(urshl))]
17672pub fn svrshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
17673 unsafe extern "unadjusted" {
17674 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv2i64")]
17675 fn _svrshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
17676 }
17677 unsafe { _svrshl_u64_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() }
17678}
17679#[doc = "Rounding shift left"]
17680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_m)"]
17681#[inline(always)]
17682#[target_feature(enable = "sve,sve2")]
17683#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17684#[cfg_attr(test, assert_instr(urshl))]
17685pub fn svrshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
17686 svrshl_u64_m(pg, op1, svdup_n_s64(op2))
17687}
17688#[doc = "Rounding shift left"]
17689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_x)"]
17690#[inline(always)]
17691#[target_feature(enable = "sve,sve2")]
17692#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17693#[cfg_attr(test, assert_instr(urshl))]
17694pub fn svrshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
17695 svrshl_u64_m(pg, op1, op2)
17696}
17697#[doc = "Rounding shift left"]
17698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_x)"]
17699#[inline(always)]
17700#[target_feature(enable = "sve,sve2")]
17701#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17702#[cfg_attr(test, assert_instr(urshl))]
17703pub fn svrshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
17704 svrshl_u64_x(pg, op1, svdup_n_s64(op2))
17705}
17706#[doc = "Rounding shift left"]
17707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_z)"]
17708#[inline(always)]
17709#[target_feature(enable = "sve,sve2")]
17710#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17711#[cfg_attr(test, assert_instr(urshl))]
17712pub fn svrshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
17713 svrshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
17714}
17715#[doc = "Rounding shift left"]
17716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_z)"]
17717#[inline(always)]
17718#[target_feature(enable = "sve,sve2")]
17719#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17720#[cfg_attr(test, assert_instr(urshl))]
17721pub fn svrshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
17722 svrshl_u64_z(pg, op1, svdup_n_s64(op2))
17723}
17724#[doc = "Rounding shift right"]
17725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_m)"]
17726#[inline(always)]
17727#[target_feature(enable = "sve,sve2")]
17728#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17729#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
17730pub fn svrshr_n_s8_m<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svint8_t {
17731 static_assert_range!(IMM2, 1..=8);
17732 unsafe extern "unadjusted" {
17733 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv16i8")]
17734 fn _svrshr_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t;
17735 }
17736 unsafe { _svrshr_n_s8_m(pg, op1, IMM2) }
17737}
17738#[doc = "Rounding shift right"]
17739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_x)"]
17740#[inline(always)]
17741#[target_feature(enable = "sve,sve2")]
17742#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17743#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
17744pub fn svrshr_n_s8_x<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svint8_t {
17745 svrshr_n_s8_m::<IMM2>(pg, op1)
17746}
17747#[doc = "Rounding shift right"]
17748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_z)"]
17749#[inline(always)]
17750#[target_feature(enable = "sve,sve2")]
17751#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17752#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
17753pub fn svrshr_n_s8_z<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svint8_t {
17754 svrshr_n_s8_m::<IMM2>(pg, svsel_s8(pg, op1, svdup_n_s8(0)))
17755}
17756#[doc = "Rounding shift right"]
17757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_m)"]
17758#[inline(always)]
17759#[target_feature(enable = "sve,sve2")]
17760#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17761#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
17762pub fn svrshr_n_s16_m<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svint16_t {
17763 static_assert_range!(IMM2, 1..=16);
17764 unsafe extern "unadjusted" {
17765 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv8i16")]
17766 fn _svrshr_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t;
17767 }
17768 unsafe { _svrshr_n_s16_m(pg.sve_into(), op1, IMM2) }
17769}
17770#[doc = "Rounding shift right"]
17771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_x)"]
17772#[inline(always)]
17773#[target_feature(enable = "sve,sve2")]
17774#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17775#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
17776pub fn svrshr_n_s16_x<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svint16_t {
17777 svrshr_n_s16_m::<IMM2>(pg, op1)
17778}
17779#[doc = "Rounding shift right"]
17780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_z)"]
17781#[inline(always)]
17782#[target_feature(enable = "sve,sve2")]
17783#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17784#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
17785pub fn svrshr_n_s16_z<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svint16_t {
17786 svrshr_n_s16_m::<IMM2>(pg, svsel_s16(pg, op1, svdup_n_s16(0)))
17787}
17788#[doc = "Rounding shift right"]
17789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_m)"]
17790#[inline(always)]
17791#[target_feature(enable = "sve,sve2")]
17792#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17793#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
17794pub fn svrshr_n_s32_m<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svint32_t {
17795 static_assert_range!(IMM2, 1..=32);
17796 unsafe extern "unadjusted" {
17797 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv4i32")]
17798 fn _svrshr_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t;
17799 }
17800 unsafe { _svrshr_n_s32_m(pg.sve_into(), op1, IMM2) }
17801}
17802#[doc = "Rounding shift right"]
17803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_x)"]
17804#[inline(always)]
17805#[target_feature(enable = "sve,sve2")]
17806#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17807#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
17808pub fn svrshr_n_s32_x<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svint32_t {
17809 svrshr_n_s32_m::<IMM2>(pg, op1)
17810}
17811#[doc = "Rounding shift right"]
17812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_z)"]
17813#[inline(always)]
17814#[target_feature(enable = "sve,sve2")]
17815#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17816#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
17817pub fn svrshr_n_s32_z<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svint32_t {
17818 svrshr_n_s32_m::<IMM2>(pg, svsel_s32(pg, op1, svdup_n_s32(0)))
17819}
17820#[doc = "Rounding shift right"]
17821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_m)"]
17822#[inline(always)]
17823#[target_feature(enable = "sve,sve2")]
17824#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17825#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
17826pub fn svrshr_n_s64_m<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svint64_t {
17827 static_assert_range!(IMM2, 1..=64);
17828 unsafe extern "unadjusted" {
17829 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv2i64")]
17830 fn _svrshr_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t;
17831 }
17832 unsafe { _svrshr_n_s64_m(pg.sve_into(), op1, IMM2) }
17833}
17834#[doc = "Rounding shift right"]
17835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_x)"]
17836#[inline(always)]
17837#[target_feature(enable = "sve,sve2")]
17838#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17839#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
17840pub fn svrshr_n_s64_x<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svint64_t {
17841 svrshr_n_s64_m::<IMM2>(pg, op1)
17842}
17843#[doc = "Rounding shift right"]
17844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_z)"]
17845#[inline(always)]
17846#[target_feature(enable = "sve,sve2")]
17847#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17848#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
17849pub fn svrshr_n_s64_z<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svint64_t {
17850 svrshr_n_s64_m::<IMM2>(pg, svsel_s64(pg, op1, svdup_n_s64(0)))
17851}
17852#[doc = "Rounding shift right"]
17853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_m)"]
17854#[inline(always)]
17855#[target_feature(enable = "sve,sve2")]
17856#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17857#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
17858pub fn svrshr_n_u8_m<const IMM2: i32>(pg: svbool_t, op1: svuint8_t) -> svuint8_t {
17859 static_assert_range!(IMM2, 1..=8);
17860 unsafe extern "unadjusted" {
17861 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv16i8")]
17862 fn _svrshr_n_u8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t;
17863 }
17864 unsafe { _svrshr_n_u8_m(pg, op1.as_signed(), IMM2).as_unsigned() }
17865}
17866#[doc = "Rounding shift right"]
17867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_x)"]
17868#[inline(always)]
17869#[target_feature(enable = "sve,sve2")]
17870#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17871#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
17872pub fn svrshr_n_u8_x<const IMM2: i32>(pg: svbool_t, op1: svuint8_t) -> svuint8_t {
17873 svrshr_n_u8_m::<IMM2>(pg, op1)
17874}
17875#[doc = "Rounding shift right"]
17876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_z)"]
17877#[inline(always)]
17878#[target_feature(enable = "sve,sve2")]
17879#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17880#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
17881pub fn svrshr_n_u8_z<const IMM2: i32>(pg: svbool_t, op1: svuint8_t) -> svuint8_t {
17882 svrshr_n_u8_m::<IMM2>(pg, svsel_u8(pg, op1, svdup_n_u8(0)))
17883}
17884#[doc = "Rounding shift right"]
17885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_m)"]
17886#[inline(always)]
17887#[target_feature(enable = "sve,sve2")]
17888#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17889#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
17890pub fn svrshr_n_u16_m<const IMM2: i32>(pg: svbool_t, op1: svuint16_t) -> svuint16_t {
17891 static_assert_range!(IMM2, 1..=16);
17892 unsafe extern "unadjusted" {
17893 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv8i16")]
17894 fn _svrshr_n_u16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t;
17895 }
17896 unsafe { _svrshr_n_u16_m(pg.sve_into(), op1.as_signed(), IMM2).as_unsigned() }
17897}
17898#[doc = "Rounding shift right"]
17899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_x)"]
17900#[inline(always)]
17901#[target_feature(enable = "sve,sve2")]
17902#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17903#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
17904pub fn svrshr_n_u16_x<const IMM2: i32>(pg: svbool_t, op1: svuint16_t) -> svuint16_t {
17905 svrshr_n_u16_m::<IMM2>(pg, op1)
17906}
17907#[doc = "Rounding shift right"]
17908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_z)"]
17909#[inline(always)]
17910#[target_feature(enable = "sve,sve2")]
17911#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17912#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
17913pub fn svrshr_n_u16_z<const IMM2: i32>(pg: svbool_t, op1: svuint16_t) -> svuint16_t {
17914 svrshr_n_u16_m::<IMM2>(pg, svsel_u16(pg, op1, svdup_n_u16(0)))
17915}
17916#[doc = "Rounding shift right"]
17917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_m)"]
17918#[inline(always)]
17919#[target_feature(enable = "sve,sve2")]
17920#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17921#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
17922pub fn svrshr_n_u32_m<const IMM2: i32>(pg: svbool_t, op1: svuint32_t) -> svuint32_t {
17923 static_assert_range!(IMM2, 1..=32);
17924 unsafe extern "unadjusted" {
17925 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv4i32")]
17926 fn _svrshr_n_u32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t;
17927 }
17928 unsafe { _svrshr_n_u32_m(pg.sve_into(), op1.as_signed(), IMM2).as_unsigned() }
17929}
17930#[doc = "Rounding shift right"]
17931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_x)"]
17932#[inline(always)]
17933#[target_feature(enable = "sve,sve2")]
17934#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17935#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
17936pub fn svrshr_n_u32_x<const IMM2: i32>(pg: svbool_t, op1: svuint32_t) -> svuint32_t {
17937 svrshr_n_u32_m::<IMM2>(pg, op1)
17938}
17939#[doc = "Rounding shift right"]
17940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_z)"]
17941#[inline(always)]
17942#[target_feature(enable = "sve,sve2")]
17943#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17944#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
17945pub fn svrshr_n_u32_z<const IMM2: i32>(pg: svbool_t, op1: svuint32_t) -> svuint32_t {
17946 svrshr_n_u32_m::<IMM2>(pg, svsel_u32(pg, op1, svdup_n_u32(0)))
17947}
17948#[doc = "Rounding shift right"]
17949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_m)"]
17950#[inline(always)]
17951#[target_feature(enable = "sve,sve2")]
17952#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17953#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
17954pub fn svrshr_n_u64_m<const IMM2: i32>(pg: svbool_t, op1: svuint64_t) -> svuint64_t {
17955 static_assert_range!(IMM2, 1..=64);
17956 unsafe extern "unadjusted" {
17957 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv2i64")]
17958 fn _svrshr_n_u64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t;
17959 }
17960 unsafe { _svrshr_n_u64_m(pg.sve_into(), op1.as_signed(), IMM2).as_unsigned() }
17961}
17962#[doc = "Rounding shift right"]
17963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_x)"]
17964#[inline(always)]
17965#[target_feature(enable = "sve,sve2")]
17966#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17967#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
17968pub fn svrshr_n_u64_x<const IMM2: i32>(pg: svbool_t, op1: svuint64_t) -> svuint64_t {
17969 svrshr_n_u64_m::<IMM2>(pg, op1)
17970}
17971#[doc = "Rounding shift right"]
17972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_z)"]
17973#[inline(always)]
17974#[target_feature(enable = "sve,sve2")]
17975#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17976#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
17977pub fn svrshr_n_u64_z<const IMM2: i32>(pg: svbool_t, op1: svuint64_t) -> svuint64_t {
17978 svrshr_n_u64_m::<IMM2>(pg, svsel_u64(pg, op1, svdup_n_u64(0)))
17979}
17980#[doc = "Rounding shift right narrow (bottom)"]
17981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s16])"]
17982#[inline(always)]
17983#[target_feature(enable = "sve,sve2")]
17984#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17985#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))]
17986pub fn svrshrnb_n_s16<const IMM2: i32>(op1: svint16_t) -> svint8_t {
17987 static_assert_range!(IMM2, 1..=8);
17988 unsafe extern "unadjusted" {
17989 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv8i16")]
17990 fn _svrshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t;
17991 }
17992 unsafe { _svrshrnb_n_s16(op1, IMM2) }
17993}
17994#[doc = "Rounding shift right narrow (bottom)"]
17995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s32])"]
17996#[inline(always)]
17997#[target_feature(enable = "sve,sve2")]
17998#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17999#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))]
18000pub fn svrshrnb_n_s32<const IMM2: i32>(op1: svint32_t) -> svint16_t {
18001 static_assert_range!(IMM2, 1..=16);
18002 unsafe extern "unadjusted" {
18003 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv4i32")]
18004 fn _svrshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t;
18005 }
18006 unsafe { _svrshrnb_n_s32(op1, IMM2) }
18007}
18008#[doc = "Rounding shift right narrow (bottom)"]
18009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s64])"]
18010#[inline(always)]
18011#[target_feature(enable = "sve,sve2")]
18012#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18013#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))]
18014pub fn svrshrnb_n_s64<const IMM2: i32>(op1: svint64_t) -> svint32_t {
18015 static_assert_range!(IMM2, 1..=32);
18016 unsafe extern "unadjusted" {
18017 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv2i64")]
18018 fn _svrshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t;
18019 }
18020 unsafe { _svrshrnb_n_s64(op1, IMM2) }
18021}
18022#[doc = "Rounding shift right narrow (bottom)"]
18023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u16])"]
18024#[inline(always)]
18025#[target_feature(enable = "sve,sve2")]
18026#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18027#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))]
18028pub fn svrshrnb_n_u16<const IMM2: i32>(op1: svuint16_t) -> svuint8_t {
18029 static_assert_range!(IMM2, 1..=8);
18030 unsafe { svrshrnb_n_s16::<IMM2>(op1.as_signed()).as_unsigned() }
18031}
18032#[doc = "Rounding shift right narrow (bottom)"]
18033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u32])"]
18034#[inline(always)]
18035#[target_feature(enable = "sve,sve2")]
18036#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18037#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))]
18038pub fn svrshrnb_n_u32<const IMM2: i32>(op1: svuint32_t) -> svuint16_t {
18039 static_assert_range!(IMM2, 1..=16);
18040 unsafe { svrshrnb_n_s32::<IMM2>(op1.as_signed()).as_unsigned() }
18041}
18042#[doc = "Rounding shift right narrow (bottom)"]
18043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u64])"]
18044#[inline(always)]
18045#[target_feature(enable = "sve,sve2")]
18046#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18047#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))]
18048pub fn svrshrnb_n_u64<const IMM2: i32>(op1: svuint64_t) -> svuint32_t {
18049 static_assert_range!(IMM2, 1..=32);
18050 unsafe { svrshrnb_n_s64::<IMM2>(op1.as_signed()).as_unsigned() }
18051}
18052#[doc = "Rounding shift right narrow (top)"]
18053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s16])"]
18054#[inline(always)]
18055#[target_feature(enable = "sve,sve2")]
18056#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18057#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))]
18058pub fn svrshrnt_n_s16<const IMM2: i32>(even: svint8_t, op1: svint16_t) -> svint8_t {
18059 static_assert_range!(IMM2, 1..=8);
18060 unsafe extern "unadjusted" {
18061 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv8i16")]
18062 fn _svrshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t;
18063 }
18064 unsafe { _svrshrnt_n_s16(even, op1, IMM2) }
18065}
18066#[doc = "Rounding shift right narrow (top)"]
18067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s32])"]
18068#[inline(always)]
18069#[target_feature(enable = "sve,sve2")]
18070#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18071#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))]
18072pub fn svrshrnt_n_s32<const IMM2: i32>(even: svint16_t, op1: svint32_t) -> svint16_t {
18073 static_assert_range!(IMM2, 1..=16);
18074 unsafe extern "unadjusted" {
18075 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv4i32")]
18076 fn _svrshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t;
18077 }
18078 unsafe { _svrshrnt_n_s32(even, op1, IMM2) }
18079}
18080#[doc = "Rounding shift right narrow (top)"]
18081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s64])"]
18082#[inline(always)]
18083#[target_feature(enable = "sve,sve2")]
18084#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18085#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))]
18086pub fn svrshrnt_n_s64<const IMM2: i32>(even: svint32_t, op1: svint64_t) -> svint32_t {
18087 static_assert_range!(IMM2, 1..=32);
18088 unsafe extern "unadjusted" {
18089 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv2i64")]
18090 fn _svrshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t;
18091 }
18092 unsafe { _svrshrnt_n_s64(even, op1, IMM2) }
18093}
18094#[doc = "Rounding shift right narrow (top)"]
18095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u16])"]
18096#[inline(always)]
18097#[target_feature(enable = "sve,sve2")]
18098#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18099#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))]
18100pub fn svrshrnt_n_u16<const IMM2: i32>(even: svuint8_t, op1: svuint16_t) -> svuint8_t {
18101 static_assert_range!(IMM2, 1..=8);
18102 unsafe { svrshrnt_n_s16::<IMM2>(even.as_signed(), op1.as_signed()).as_unsigned() }
18103}
18104#[doc = "Rounding shift right narrow (top)"]
18105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u32])"]
18106#[inline(always)]
18107#[target_feature(enable = "sve,sve2")]
18108#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18109#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))]
18110pub fn svrshrnt_n_u32<const IMM2: i32>(even: svuint16_t, op1: svuint32_t) -> svuint16_t {
18111 static_assert_range!(IMM2, 1..=16);
18112 unsafe { svrshrnt_n_s32::<IMM2>(even.as_signed(), op1.as_signed()).as_unsigned() }
18113}
18114#[doc = "Rounding shift right narrow (top)"]
18115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u64])"]
18116#[inline(always)]
18117#[target_feature(enable = "sve,sve2")]
18118#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18119#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))]
18120pub fn svrshrnt_n_u64<const IMM2: i32>(even: svuint32_t, op1: svuint64_t) -> svuint32_t {
18121 static_assert_range!(IMM2, 1..=32);
18122 unsafe { svrshrnt_n_s64::<IMM2>(even.as_signed(), op1.as_signed()).as_unsigned() }
18123}
18124#[doc = "Reciprocal square root estimate"]
18125#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_m)"]
18126#[inline(always)]
18127#[target_feature(enable = "sve,sve2")]
18128#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18129#[cfg_attr(test, assert_instr(ursqrte))]
18130pub fn svrsqrte_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
18131 unsafe extern "unadjusted" {
18132 #[cfg_attr(
18133 target_arch = "aarch64",
18134 link_name = "llvm.aarch64.sve.ursqrte.nxv4i32"
18135 )]
18136 fn _svrsqrte_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
18137 }
18138 unsafe { _svrsqrte_u32_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() }
18139}
18140#[doc = "Reciprocal square root estimate"]
18141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_x)"]
18142#[inline(always)]
18143#[target_feature(enable = "sve,sve2")]
18144#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18145#[cfg_attr(test, assert_instr(ursqrte))]
18146pub fn svrsqrte_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
18147 svrsqrte_u32_m(op, pg, op)
18148}
18149#[doc = "Reciprocal square root estimate"]
18150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_z)"]
18151#[inline(always)]
18152#[target_feature(enable = "sve,sve2")]
18153#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18154#[cfg_attr(test, assert_instr(ursqrte))]
18155pub fn svrsqrte_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
18156 svrsqrte_u32_m(svdup_n_u32(0), pg, op)
18157}
18158#[doc = "Rounding shift right and accumulate"]
18159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s8])"]
18160#[inline(always)]
18161#[target_feature(enable = "sve,sve2")]
18162#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18163#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))]
18164pub fn svrsra_n_s8<const IMM3: i32>(op1: svint8_t, op2: svint8_t) -> svint8_t {
18165 static_assert_range!(IMM3, 1..=8);
18166 unsafe extern "unadjusted" {
18167 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv16i8")]
18168 fn _svrsra_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t;
18169 }
18170 unsafe { _svrsra_n_s8(op1, op2, IMM3) }
18171}
18172#[doc = "Rounding shift right and accumulate"]
18173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s16])"]
18174#[inline(always)]
18175#[target_feature(enable = "sve,sve2")]
18176#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18177#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))]
18178pub fn svrsra_n_s16<const IMM3: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
18179 static_assert_range!(IMM3, 1..=16);
18180 unsafe extern "unadjusted" {
18181 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv8i16")]
18182 fn _svrsra_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t;
18183 }
18184 unsafe { _svrsra_n_s16(op1, op2, IMM3) }
18185}
18186#[doc = "Rounding shift right and accumulate"]
18187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s32])"]
18188#[inline(always)]
18189#[target_feature(enable = "sve,sve2")]
18190#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18191#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))]
18192pub fn svrsra_n_s32<const IMM3: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
18193 static_assert_range!(IMM3, 1..=32);
18194 unsafe extern "unadjusted" {
18195 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv4i32")]
18196 fn _svrsra_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
18197 }
18198 unsafe { _svrsra_n_s32(op1, op2, IMM3) }
18199}
18200#[doc = "Rounding shift right and accumulate"]
18201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s64])"]
18202#[inline(always)]
18203#[target_feature(enable = "sve,sve2")]
18204#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18205#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))]
18206pub fn svrsra_n_s64<const IMM3: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
18207 static_assert_range!(IMM3, 1..=64);
18208 unsafe extern "unadjusted" {
18209 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv2i64")]
18210 fn _svrsra_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t;
18211 }
18212 unsafe { _svrsra_n_s64(op1, op2, IMM3) }
18213}
18214#[doc = "Rounding shift right and accumulate"]
18215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u8])"]
18216#[inline(always)]
18217#[target_feature(enable = "sve,sve2")]
18218#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18219#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))]
18220pub fn svrsra_n_u8<const IMM3: i32>(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
18221 static_assert_range!(IMM3, 1..=8);
18222 unsafe extern "unadjusted" {
18223 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv16i8")]
18224 fn _svrsra_n_u8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t;
18225 }
18226 unsafe { _svrsra_n_u8(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() }
18227}
18228#[doc = "Rounding shift right and accumulate"]
18229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u16])"]
18230#[inline(always)]
18231#[target_feature(enable = "sve,sve2")]
18232#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18233#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))]
18234pub fn svrsra_n_u16<const IMM3: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
18235 static_assert_range!(IMM3, 1..=16);
18236 unsafe extern "unadjusted" {
18237 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv8i16")]
18238 fn _svrsra_n_u16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t;
18239 }
18240 unsafe { _svrsra_n_u16(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() }
18241}
18242#[doc = "Rounding shift right and accumulate"]
18243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u32])"]
18244#[inline(always)]
18245#[target_feature(enable = "sve,sve2")]
18246#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18247#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))]
18248pub fn svrsra_n_u32<const IMM3: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
18249 static_assert_range!(IMM3, 1..=32);
18250 unsafe extern "unadjusted" {
18251 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv4i32")]
18252 fn _svrsra_n_u32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
18253 }
18254 unsafe { _svrsra_n_u32(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() }
18255}
18256#[doc = "Rounding shift right and accumulate"]
18257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u64])"]
18258#[inline(always)]
18259#[target_feature(enable = "sve,sve2")]
18260#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18261#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))]
18262pub fn svrsra_n_u64<const IMM3: i32>(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
18263 static_assert_range!(IMM3, 1..=64);
18264 unsafe extern "unadjusted" {
18265 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv2i64")]
18266 fn _svrsra_n_u64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t;
18267 }
18268 unsafe { _svrsra_n_u64(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() }
18269}
18270#[doc = "Rounding subtract narrow high part (bottom)"]
18271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s16])"]
18272#[inline(always)]
18273#[target_feature(enable = "sve,sve2")]
18274#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18275#[cfg_attr(test, assert_instr(rsubhnb))]
18276pub fn svrsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t {
18277 unsafe extern "unadjusted" {
18278 #[cfg_attr(
18279 target_arch = "aarch64",
18280 link_name = "llvm.aarch64.sve.rsubhnb.nxv8i16"
18281 )]
18282 fn _svrsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t;
18283 }
18284 unsafe { _svrsubhnb_s16(op1, op2) }
18285}
18286#[doc = "Rounding subtract narrow high part (bottom)"]
18287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s16])"]
18288#[inline(always)]
18289#[target_feature(enable = "sve,sve2")]
18290#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18291#[cfg_attr(test, assert_instr(rsubhnb))]
18292pub fn svrsubhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t {
18293 svrsubhnb_s16(op1, svdup_n_s16(op2))
18294}
18295#[doc = "Rounding subtract narrow high part (bottom)"]
18296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s32])"]
18297#[inline(always)]
18298#[target_feature(enable = "sve,sve2")]
18299#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18300#[cfg_attr(test, assert_instr(rsubhnb))]
18301pub fn svrsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t {
18302 unsafe extern "unadjusted" {
18303 #[cfg_attr(
18304 target_arch = "aarch64",
18305 link_name = "llvm.aarch64.sve.rsubhnb.nxv4i32"
18306 )]
18307 fn _svrsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t;
18308 }
18309 unsafe { _svrsubhnb_s32(op1, op2) }
18310}
18311#[doc = "Rounding subtract narrow high part (bottom)"]
18312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s32])"]
18313#[inline(always)]
18314#[target_feature(enable = "sve,sve2")]
18315#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18316#[cfg_attr(test, assert_instr(rsubhnb))]
18317pub fn svrsubhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t {
18318 svrsubhnb_s32(op1, svdup_n_s32(op2))
18319}
18320#[doc = "Rounding subtract narrow high part (bottom)"]
18321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s64])"]
18322#[inline(always)]
18323#[target_feature(enable = "sve,sve2")]
18324#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18325#[cfg_attr(test, assert_instr(rsubhnb))]
18326pub fn svrsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t {
18327 unsafe extern "unadjusted" {
18328 #[cfg_attr(
18329 target_arch = "aarch64",
18330 link_name = "llvm.aarch64.sve.rsubhnb.nxv2i64"
18331 )]
18332 fn _svrsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t;
18333 }
18334 unsafe { _svrsubhnb_s64(op1, op2) }
18335}
18336#[doc = "Rounding subtract narrow high part (bottom)"]
18337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s64])"]
18338#[inline(always)]
18339#[target_feature(enable = "sve,sve2")]
18340#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18341#[cfg_attr(test, assert_instr(rsubhnb))]
18342pub fn svrsubhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t {
18343 svrsubhnb_s64(op1, svdup_n_s64(op2))
18344}
18345#[doc = "Rounding subtract narrow high part (bottom)"]
18346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u16])"]
18347#[inline(always)]
18348#[target_feature(enable = "sve,sve2")]
18349#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18350#[cfg_attr(test, assert_instr(rsubhnb))]
18351pub fn svrsubhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t {
18352 unsafe { svrsubhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
18353}
18354#[doc = "Rounding subtract narrow high part (bottom)"]
18355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u16])"]
18356#[inline(always)]
18357#[target_feature(enable = "sve,sve2")]
18358#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18359#[cfg_attr(test, assert_instr(rsubhnb))]
18360pub fn svrsubhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t {
18361 svrsubhnb_u16(op1, svdup_n_u16(op2))
18362}
18363#[doc = "Rounding subtract narrow high part (bottom)"]
18364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u32])"]
18365#[inline(always)]
18366#[target_feature(enable = "sve,sve2")]
18367#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18368#[cfg_attr(test, assert_instr(rsubhnb))]
18369pub fn svrsubhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t {
18370 unsafe { svrsubhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
18371}
18372#[doc = "Rounding subtract narrow high part (bottom)"]
18373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u32])"]
18374#[inline(always)]
18375#[target_feature(enable = "sve,sve2")]
18376#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18377#[cfg_attr(test, assert_instr(rsubhnb))]
18378pub fn svrsubhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t {
18379 svrsubhnb_u32(op1, svdup_n_u32(op2))
18380}
18381#[doc = "Rounding subtract narrow high part (bottom)"]
18382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u64])"]
18383#[inline(always)]
18384#[target_feature(enable = "sve,sve2")]
18385#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18386#[cfg_attr(test, assert_instr(rsubhnb))]
18387pub fn svrsubhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t {
18388 unsafe { svrsubhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
18389}
18390#[doc = "Rounding subtract narrow high part (bottom)"]
18391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u64])"]
18392#[inline(always)]
18393#[target_feature(enable = "sve,sve2")]
18394#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18395#[cfg_attr(test, assert_instr(rsubhnb))]
18396pub fn svrsubhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t {
18397 svrsubhnb_u64(op1, svdup_n_u64(op2))
18398}
18399#[doc = "Rounding subtract narrow high part (top)"]
18400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s16])"]
18401#[inline(always)]
18402#[target_feature(enable = "sve,sve2")]
18403#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18404#[cfg_attr(test, assert_instr(rsubhnt))]
18405pub fn svrsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t {
18406 unsafe extern "unadjusted" {
18407 #[cfg_attr(
18408 target_arch = "aarch64",
18409 link_name = "llvm.aarch64.sve.rsubhnt.nxv8i16"
18410 )]
18411 fn _svrsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t;
18412 }
18413 unsafe { _svrsubhnt_s16(even, op1, op2) }
18414}
18415#[doc = "Rounding subtract narrow high part (top)"]
18416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s16])"]
18417#[inline(always)]
18418#[target_feature(enable = "sve,sve2")]
18419#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18420#[cfg_attr(test, assert_instr(rsubhnt))]
18421pub fn svrsubhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t {
18422 svrsubhnt_s16(even, op1, svdup_n_s16(op2))
18423}
18424#[doc = "Rounding subtract narrow high part (top)"]
18425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s32])"]
18426#[inline(always)]
18427#[target_feature(enable = "sve,sve2")]
18428#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18429#[cfg_attr(test, assert_instr(rsubhnt))]
18430pub fn svrsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t {
18431 unsafe extern "unadjusted" {
18432 #[cfg_attr(
18433 target_arch = "aarch64",
18434 link_name = "llvm.aarch64.sve.rsubhnt.nxv4i32"
18435 )]
18436 fn _svrsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t;
18437 }
18438 unsafe { _svrsubhnt_s32(even, op1, op2) }
18439}
18440#[doc = "Rounding subtract narrow high part (top)"]
18441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s32])"]
18442#[inline(always)]
18443#[target_feature(enable = "sve,sve2")]
18444#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18445#[cfg_attr(test, assert_instr(rsubhnt))]
18446pub fn svrsubhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t {
18447 svrsubhnt_s32(even, op1, svdup_n_s32(op2))
18448}
18449#[doc = "Rounding subtract narrow high part (top)"]
18450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s64])"]
18451#[inline(always)]
18452#[target_feature(enable = "sve,sve2")]
18453#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18454#[cfg_attr(test, assert_instr(rsubhnt))]
18455pub fn svrsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t {
18456 unsafe extern "unadjusted" {
18457 #[cfg_attr(
18458 target_arch = "aarch64",
18459 link_name = "llvm.aarch64.sve.rsubhnt.nxv2i64"
18460 )]
18461 fn _svrsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t;
18462 }
18463 unsafe { _svrsubhnt_s64(even, op1, op2) }
18464}
18465#[doc = "Rounding subtract narrow high part (top)"]
18466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s64])"]
18467#[inline(always)]
18468#[target_feature(enable = "sve,sve2")]
18469#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18470#[cfg_attr(test, assert_instr(rsubhnt))]
18471pub fn svrsubhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t {
18472 svrsubhnt_s64(even, op1, svdup_n_s64(op2))
18473}
18474#[doc = "Rounding subtract narrow high part (top)"]
18475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u16])"]
18476#[inline(always)]
18477#[target_feature(enable = "sve,sve2")]
18478#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18479#[cfg_attr(test, assert_instr(rsubhnt))]
18480pub fn svrsubhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t {
18481 unsafe { svrsubhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
18482}
18483#[doc = "Rounding subtract narrow high part (top)"]
18484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u16])"]
18485#[inline(always)]
18486#[target_feature(enable = "sve,sve2")]
18487#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18488#[cfg_attr(test, assert_instr(rsubhnt))]
18489pub fn svrsubhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t {
18490 svrsubhnt_u16(even, op1, svdup_n_u16(op2))
18491}
18492#[doc = "Rounding subtract narrow high part (top)"]
18493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u32])"]
18494#[inline(always)]
18495#[target_feature(enable = "sve,sve2")]
18496#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18497#[cfg_attr(test, assert_instr(rsubhnt))]
18498pub fn svrsubhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t {
18499 unsafe { svrsubhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
18500}
18501#[doc = "Rounding subtract narrow high part (top)"]
18502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u32])"]
18503#[inline(always)]
18504#[target_feature(enable = "sve,sve2")]
18505#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18506#[cfg_attr(test, assert_instr(rsubhnt))]
18507pub fn svrsubhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t {
18508 svrsubhnt_u32(even, op1, svdup_n_u32(op2))
18509}
18510#[doc = "Rounding subtract narrow high part (top)"]
18511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u64])"]
18512#[inline(always)]
18513#[target_feature(enable = "sve,sve2")]
18514#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18515#[cfg_attr(test, assert_instr(rsubhnt))]
18516pub fn svrsubhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t {
18517 unsafe { svrsubhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
18518}
18519#[doc = "Rounding subtract narrow high part (top)"]
18520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u64])"]
18521#[inline(always)]
18522#[target_feature(enable = "sve,sve2")]
18523#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18524#[cfg_attr(test, assert_instr(rsubhnt))]
18525pub fn svrsubhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t {
18526 svrsubhnt_u64(even, op1, svdup_n_u64(op2))
18527}
18528#[doc = "Subtract with borrow long (bottom)"]
18529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_u32])"]
18530#[inline(always)]
18531#[target_feature(enable = "sve,sve2")]
18532#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18533#[cfg_attr(test, assert_instr(sbclb))]
18534pub fn svsbclb_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
18535 unsafe extern "unadjusted" {
18536 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclb.nxv4i32")]
18537 fn _svsbclb_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
18538 }
18539 unsafe { _svsbclb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
18540}
18541#[doc = "Subtract with borrow long (bottom)"]
18542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_n_u32])"]
18543#[inline(always)]
18544#[target_feature(enable = "sve,sve2")]
18545#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18546#[cfg_attr(test, assert_instr(sbclb))]
18547pub fn svsbclb_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
18548 svsbclb_u32(op1, op2, svdup_n_u32(op3))
18549}
18550#[doc = "Subtract with borrow long (bottom)"]
18551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_u64])"]
18552#[inline(always)]
18553#[target_feature(enable = "sve,sve2")]
18554#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18555#[cfg_attr(test, assert_instr(sbclb))]
18556pub fn svsbclb_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
18557 unsafe extern "unadjusted" {
18558 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclb.nxv2i64")]
18559 fn _svsbclb_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
18560 }
18561 unsafe { _svsbclb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
18562}
18563#[doc = "Subtract with borrow long (bottom)"]
18564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_n_u64])"]
18565#[inline(always)]
18566#[target_feature(enable = "sve,sve2")]
18567#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18568#[cfg_attr(test, assert_instr(sbclb))]
18569pub fn svsbclb_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
18570 svsbclb_u64(op1, op2, svdup_n_u64(op3))
18571}
18572#[doc = "Subtract with borrow long (top)"]
18573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_u32])"]
18574#[inline(always)]
18575#[target_feature(enable = "sve,sve2")]
18576#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18577#[cfg_attr(test, assert_instr(sbclt))]
18578pub fn svsbclt_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
18579 unsafe extern "unadjusted" {
18580 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclt.nxv4i32")]
18581 fn _svsbclt_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
18582 }
18583 unsafe { _svsbclt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
18584}
18585#[doc = "Subtract with borrow long (top)"]
18586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_n_u32])"]
18587#[inline(always)]
18588#[target_feature(enable = "sve,sve2")]
18589#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18590#[cfg_attr(test, assert_instr(sbclt))]
18591pub fn svsbclt_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
18592 svsbclt_u32(op1, op2, svdup_n_u32(op3))
18593}
18594#[doc = "Subtract with borrow long (top)"]
18595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_u64])"]
18596#[inline(always)]
18597#[target_feature(enable = "sve,sve2")]
18598#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18599#[cfg_attr(test, assert_instr(sbclt))]
18600pub fn svsbclt_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
18601 unsafe extern "unadjusted" {
18602 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclt.nxv2i64")]
18603 fn _svsbclt_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
18604 }
18605 unsafe { _svsbclt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
18606}
18607#[doc = "Subtract with borrow long (top)"]
18608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_n_u64])"]
18609#[inline(always)]
18610#[target_feature(enable = "sve,sve2")]
18611#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18612#[cfg_attr(test, assert_instr(sbclt))]
18613pub fn svsbclt_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
18614 svsbclt_u64(op1, op2, svdup_n_u64(op3))
18615}
18616#[doc = "Shift left long (bottom)"]
18617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s16])"]
18618#[inline(always)]
18619#[target_feature(enable = "sve,sve2")]
18620#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18621#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))]
18622pub fn svshllb_n_s16<const IMM2: i32>(op1: svint8_t) -> svint16_t {
18623 static_assert_range!(IMM2, 0..=7);
18624 unsafe extern "unadjusted" {
18625 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv8i16")]
18626 fn _svshllb_n_s16(op1: svint8_t, imm2: i32) -> svint16_t;
18627 }
18628 unsafe { _svshllb_n_s16(op1, IMM2) }
18629}
18630#[doc = "Shift left long (bottom)"]
18631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s32])"]
18632#[inline(always)]
18633#[target_feature(enable = "sve,sve2")]
18634#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18635#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))]
18636pub fn svshllb_n_s32<const IMM2: i32>(op1: svint16_t) -> svint32_t {
18637 static_assert_range!(IMM2, 0..=15);
18638 unsafe extern "unadjusted" {
18639 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv4i32")]
18640 fn _svshllb_n_s32(op1: svint16_t, imm2: i32) -> svint32_t;
18641 }
18642 unsafe { _svshllb_n_s32(op1, IMM2) }
18643}
18644#[doc = "Shift left long (bottom)"]
18645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s64])"]
18646#[inline(always)]
18647#[target_feature(enable = "sve,sve2")]
18648#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18649#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))]
18650pub fn svshllb_n_s64<const IMM2: i32>(op1: svint32_t) -> svint64_t {
18651 static_assert_range!(IMM2, 0..=31);
18652 unsafe extern "unadjusted" {
18653 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv2i64")]
18654 fn _svshllb_n_s64(op1: svint32_t, imm2: i32) -> svint64_t;
18655 }
18656 unsafe { _svshllb_n_s64(op1, IMM2) }
18657}
18658#[doc = "Shift left long (bottom)"]
18659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u16])"]
18660#[inline(always)]
18661#[target_feature(enable = "sve,sve2")]
18662#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18663#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))]
18664pub fn svshllb_n_u16<const IMM2: i32>(op1: svuint8_t) -> svuint16_t {
18665 static_assert_range!(IMM2, 0..=7);
18666 unsafe extern "unadjusted" {
18667 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv8i16")]
18668 fn _svshllb_n_u16(op1: svint8_t, imm2: i32) -> svint16_t;
18669 }
18670 unsafe { _svshllb_n_u16(op1.as_signed(), IMM2).as_unsigned() }
18671}
18672#[doc = "Shift left long (bottom)"]
18673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u32])"]
18674#[inline(always)]
18675#[target_feature(enable = "sve,sve2")]
18676#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18677#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))]
18678pub fn svshllb_n_u32<const IMM2: i32>(op1: svuint16_t) -> svuint32_t {
18679 static_assert_range!(IMM2, 0..=15);
18680 unsafe extern "unadjusted" {
18681 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv4i32")]
18682 fn _svshllb_n_u32(op1: svint16_t, imm2: i32) -> svint32_t;
18683 }
18684 unsafe { _svshllb_n_u32(op1.as_signed(), IMM2).as_unsigned() }
18685}
18686#[doc = "Shift left long (bottom)"]
18687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u64])"]
18688#[inline(always)]
18689#[target_feature(enable = "sve,sve2")]
18690#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18691#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))]
18692pub fn svshllb_n_u64<const IMM2: i32>(op1: svuint32_t) -> svuint64_t {
18693 static_assert_range!(IMM2, 0..=31);
18694 unsafe extern "unadjusted" {
18695 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv2i64")]
18696 fn _svshllb_n_u64(op1: svint32_t, imm2: i32) -> svint64_t;
18697 }
18698 unsafe { _svshllb_n_u64(op1.as_signed(), IMM2).as_unsigned() }
18699}
18700#[doc = "Shift left long (top)"]
18701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s16])"]
18702#[inline(always)]
18703#[target_feature(enable = "sve,sve2")]
18704#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18705#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))]
18706pub fn svshllt_n_s16<const IMM2: i32>(op1: svint8_t) -> svint16_t {
18707 static_assert_range!(IMM2, 0..=7);
18708 unsafe extern "unadjusted" {
18709 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv8i16")]
18710 fn _svshllt_n_s16(op1: svint8_t, imm2: i32) -> svint16_t;
18711 }
18712 unsafe { _svshllt_n_s16(op1, IMM2) }
18713}
18714#[doc = "Shift left long (top)"]
18715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s32])"]
18716#[inline(always)]
18717#[target_feature(enable = "sve,sve2")]
18718#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18719#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))]
18720pub fn svshllt_n_s32<const IMM2: i32>(op1: svint16_t) -> svint32_t {
18721 static_assert_range!(IMM2, 0..=15);
18722 unsafe extern "unadjusted" {
18723 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv4i32")]
18724 fn _svshllt_n_s32(op1: svint16_t, imm2: i32) -> svint32_t;
18725 }
18726 unsafe { _svshllt_n_s32(op1, IMM2) }
18727}
18728#[doc = "Shift left long (top)"]
18729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s64])"]
18730#[inline(always)]
18731#[target_feature(enable = "sve,sve2")]
18732#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18733#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))]
18734pub fn svshllt_n_s64<const IMM2: i32>(op1: svint32_t) -> svint64_t {
18735 static_assert_range!(IMM2, 0..=31);
18736 unsafe extern "unadjusted" {
18737 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv2i64")]
18738 fn _svshllt_n_s64(op1: svint32_t, imm2: i32) -> svint64_t;
18739 }
18740 unsafe { _svshllt_n_s64(op1, IMM2) }
18741}
18742#[doc = "Shift left long (top)"]
18743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u16])"]
18744#[inline(always)]
18745#[target_feature(enable = "sve,sve2")]
18746#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18747#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))]
18748pub fn svshllt_n_u16<const IMM2: i32>(op1: svuint8_t) -> svuint16_t {
18749 static_assert_range!(IMM2, 0..=7);
18750 unsafe extern "unadjusted" {
18751 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv8i16")]
18752 fn _svshllt_n_u16(op1: svint8_t, imm2: i32) -> svint16_t;
18753 }
18754 unsafe { _svshllt_n_u16(op1.as_signed(), IMM2).as_unsigned() }
18755}
18756#[doc = "Shift left long (top)"]
18757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u32])"]
18758#[inline(always)]
18759#[target_feature(enable = "sve,sve2")]
18760#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18761#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))]
18762pub fn svshllt_n_u32<const IMM2: i32>(op1: svuint16_t) -> svuint32_t {
18763 static_assert_range!(IMM2, 0..=15);
18764 unsafe extern "unadjusted" {
18765 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv4i32")]
18766 fn _svshllt_n_u32(op1: svint16_t, imm2: i32) -> svint32_t;
18767 }
18768 unsafe { _svshllt_n_u32(op1.as_signed(), IMM2).as_unsigned() }
18769}
18770#[doc = "Shift left long (top)"]
18771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u64])"]
18772#[inline(always)]
18773#[target_feature(enable = "sve,sve2")]
18774#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18775#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))]
18776pub fn svshllt_n_u64<const IMM2: i32>(op1: svuint32_t) -> svuint64_t {
18777 static_assert_range!(IMM2, 0..=31);
18778 unsafe extern "unadjusted" {
18779 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv2i64")]
18780 fn _svshllt_n_u64(op1: svint32_t, imm2: i32) -> svint64_t;
18781 }
18782 unsafe { _svshllt_n_u64(op1.as_signed(), IMM2).as_unsigned() }
18783}
18784#[doc = "Shift right narrow (bottom)"]
18785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s16])"]
18786#[inline(always)]
18787#[target_feature(enable = "sve,sve2")]
18788#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18789#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))]
18790pub fn svshrnb_n_s16<const IMM2: i32>(op1: svint16_t) -> svint8_t {
18791 static_assert_range!(IMM2, 1..=8);
18792 unsafe extern "unadjusted" {
18793 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv8i16")]
18794 fn _svshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t;
18795 }
18796 unsafe { _svshrnb_n_s16(op1, IMM2) }
18797}
18798#[doc = "Shift right narrow (bottom)"]
18799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s32])"]
18800#[inline(always)]
18801#[target_feature(enable = "sve,sve2")]
18802#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18803#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))]
18804pub fn svshrnb_n_s32<const IMM2: i32>(op1: svint32_t) -> svint16_t {
18805 static_assert_range!(IMM2, 1..=16);
18806 unsafe extern "unadjusted" {
18807 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv4i32")]
18808 fn _svshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t;
18809 }
18810 unsafe { _svshrnb_n_s32(op1, IMM2) }
18811}
18812#[doc = "Shift right narrow (bottom)"]
18813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s64])"]
18814#[inline(always)]
18815#[target_feature(enable = "sve,sve2")]
18816#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18817#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))]
18818pub fn svshrnb_n_s64<const IMM2: i32>(op1: svint64_t) -> svint32_t {
18819 static_assert_range!(IMM2, 1..=32);
18820 unsafe extern "unadjusted" {
18821 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv2i64")]
18822 fn _svshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t;
18823 }
18824 unsafe { _svshrnb_n_s64(op1, IMM2) }
18825}
18826#[doc = "Shift right narrow (bottom)"]
18827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u16])"]
18828#[inline(always)]
18829#[target_feature(enable = "sve,sve2")]
18830#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18831#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))]
18832pub fn svshrnb_n_u16<const IMM2: i32>(op1: svuint16_t) -> svuint8_t {
18833 static_assert_range!(IMM2, 1..=8);
18834 unsafe { svshrnb_n_s16::<IMM2>(op1.as_signed()).as_unsigned() }
18835}
18836#[doc = "Shift right narrow (bottom)"]
18837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u32])"]
18838#[inline(always)]
18839#[target_feature(enable = "sve,sve2")]
18840#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18841#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))]
18842pub fn svshrnb_n_u32<const IMM2: i32>(op1: svuint32_t) -> svuint16_t {
18843 static_assert_range!(IMM2, 1..=16);
18844 unsafe { svshrnb_n_s32::<IMM2>(op1.as_signed()).as_unsigned() }
18845}
18846#[doc = "Shift right narrow (bottom)"]
18847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u64])"]
18848#[inline(always)]
18849#[target_feature(enable = "sve,sve2")]
18850#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18851#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))]
18852pub fn svshrnb_n_u64<const IMM2: i32>(op1: svuint64_t) -> svuint32_t {
18853 static_assert_range!(IMM2, 1..=32);
18854 unsafe { svshrnb_n_s64::<IMM2>(op1.as_signed()).as_unsigned() }
18855}
18856#[doc = "Shift right narrow (top)"]
18857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s16])"]
18858#[inline(always)]
18859#[target_feature(enable = "sve,sve2")]
18860#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18861#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))]
18862pub fn svshrnt_n_s16<const IMM2: i32>(even: svint8_t, op1: svint16_t) -> svint8_t {
18863 static_assert_range!(IMM2, 1..=8);
18864 unsafe extern "unadjusted" {
18865 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv8i16")]
18866 fn _svshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t;
18867 }
18868 unsafe { _svshrnt_n_s16(even, op1, IMM2) }
18869}
18870#[doc = "Shift right narrow (top)"]
18871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s32])"]
18872#[inline(always)]
18873#[target_feature(enable = "sve,sve2")]
18874#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18875#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))]
18876pub fn svshrnt_n_s32<const IMM2: i32>(even: svint16_t, op1: svint32_t) -> svint16_t {
18877 static_assert_range!(IMM2, 1..=16);
18878 unsafe extern "unadjusted" {
18879 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv4i32")]
18880 fn _svshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t;
18881 }
18882 unsafe { _svshrnt_n_s32(even, op1, IMM2) }
18883}
18884#[doc = "Shift right narrow (top)"]
18885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s64])"]
18886#[inline(always)]
18887#[target_feature(enable = "sve,sve2")]
18888#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18889#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))]
18890pub fn svshrnt_n_s64<const IMM2: i32>(even: svint32_t, op1: svint64_t) -> svint32_t {
18891 static_assert_range!(IMM2, 1..=32);
18892 unsafe extern "unadjusted" {
18893 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv2i64")]
18894 fn _svshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t;
18895 }
18896 unsafe { _svshrnt_n_s64(even, op1, IMM2) }
18897}
18898#[doc = "Shift right narrow (top)"]
18899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u16])"]
18900#[inline(always)]
18901#[target_feature(enable = "sve,sve2")]
18902#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18903#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))]
18904pub fn svshrnt_n_u16<const IMM2: i32>(even: svuint8_t, op1: svuint16_t) -> svuint8_t {
18905 static_assert_range!(IMM2, 1..=8);
18906 unsafe { svshrnt_n_s16::<IMM2>(even.as_signed(), op1.as_signed()).as_unsigned() }
18907}
18908#[doc = "Shift right narrow (top)"]
18909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u32])"]
18910#[inline(always)]
18911#[target_feature(enable = "sve,sve2")]
18912#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18913#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))]
18914pub fn svshrnt_n_u32<const IMM2: i32>(even: svuint16_t, op1: svuint32_t) -> svuint16_t {
18915 static_assert_range!(IMM2, 1..=16);
18916 unsafe { svshrnt_n_s32::<IMM2>(even.as_signed(), op1.as_signed()).as_unsigned() }
18917}
18918#[doc = "Shift right narrow (top)"]
18919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u64])"]
18920#[inline(always)]
18921#[target_feature(enable = "sve,sve2")]
18922#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18923#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))]
18924pub fn svshrnt_n_u64<const IMM2: i32>(even: svuint32_t, op1: svuint64_t) -> svuint32_t {
18925 static_assert_range!(IMM2, 1..=32);
18926 unsafe { svshrnt_n_s64::<IMM2>(even.as_signed(), op1.as_signed()).as_unsigned() }
18927}
18928#[doc = "Shift left and insert"]
18929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s8])"]
18930#[inline(always)]
18931#[target_feature(enable = "sve,sve2")]
18932#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18933#[cfg_attr(test, assert_instr(sli, IMM3 = 0))]
18934pub fn svsli_n_s8<const IMM3: i32>(op1: svint8_t, op2: svint8_t) -> svint8_t {
18935 static_assert_range!(IMM3, 0..=7);
18936 unsafe extern "unadjusted" {
18937 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv16i8")]
18938 fn _svsli_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t;
18939 }
18940 unsafe { _svsli_n_s8(op1, op2, IMM3) }
18941}
18942#[doc = "Shift left and insert"]
18943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s16])"]
18944#[inline(always)]
18945#[target_feature(enable = "sve,sve2")]
18946#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18947#[cfg_attr(test, assert_instr(sli, IMM3 = 0))]
18948pub fn svsli_n_s16<const IMM3: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
18949 static_assert_range!(IMM3, 0..=15);
18950 unsafe extern "unadjusted" {
18951 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv8i16")]
18952 fn _svsli_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t;
18953 }
18954 unsafe { _svsli_n_s16(op1, op2, IMM3) }
18955}
18956#[doc = "Shift left and insert"]
18957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s32])"]
18958#[inline(always)]
18959#[target_feature(enable = "sve,sve2")]
18960#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18961#[cfg_attr(test, assert_instr(sli, IMM3 = 0))]
18962pub fn svsli_n_s32<const IMM3: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
18963 static_assert_range!(IMM3, 0..=31);
18964 unsafe extern "unadjusted" {
18965 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv4i32")]
18966 fn _svsli_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
18967 }
18968 unsafe { _svsli_n_s32(op1, op2, IMM3) }
18969}
18970#[doc = "Shift left and insert"]
18971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s64])"]
18972#[inline(always)]
18973#[target_feature(enable = "sve,sve2")]
18974#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18975#[cfg_attr(test, assert_instr(sli, IMM3 = 0))]
18976pub fn svsli_n_s64<const IMM3: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
18977 static_assert_range!(IMM3, 0..=63);
18978 unsafe extern "unadjusted" {
18979 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv2i64")]
18980 fn _svsli_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t;
18981 }
18982 unsafe { _svsli_n_s64(op1, op2, IMM3) }
18983}
18984#[doc = "Shift left and insert"]
18985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u8])"]
18986#[inline(always)]
18987#[target_feature(enable = "sve,sve2")]
18988#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18989#[cfg_attr(test, assert_instr(sli, IMM3 = 0))]
18990pub fn svsli_n_u8<const IMM3: i32>(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
18991 static_assert_range!(IMM3, 0..=7);
18992 unsafe { svsli_n_s8::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
18993}
18994#[doc = "Shift left and insert"]
18995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u16])"]
18996#[inline(always)]
18997#[target_feature(enable = "sve,sve2")]
18998#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18999#[cfg_attr(test, assert_instr(sli, IMM3 = 0))]
19000pub fn svsli_n_u16<const IMM3: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
19001 static_assert_range!(IMM3, 0..=15);
19002 unsafe { svsli_n_s16::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
19003}
19004#[doc = "Shift left and insert"]
19005#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u32])"]
19006#[inline(always)]
19007#[target_feature(enable = "sve,sve2")]
19008#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19009#[cfg_attr(test, assert_instr(sli, IMM3 = 0))]
19010pub fn svsli_n_u32<const IMM3: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
19011 static_assert_range!(IMM3, 0..=31);
19012 unsafe { svsli_n_s32::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
19013}
19014#[doc = "Shift left and insert"]
19015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u64])"]
19016#[inline(always)]
19017#[target_feature(enable = "sve,sve2")]
19018#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19019#[cfg_attr(test, assert_instr(sli, IMM3 = 0))]
19020pub fn svsli_n_u64<const IMM3: i32>(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
19021 static_assert_range!(IMM3, 0..=63);
19022 unsafe { svsli_n_s64::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
19023}
19024#[doc = "SM4 encryption and decryption"]
19025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsm4e[_u32])"]
19026#[inline(always)]
19027#[target_feature(enable = "sve,sve2,sve2-sm4")]
19028#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19029#[cfg_attr(test, assert_instr(sm4e))]
19030pub fn svsm4e_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
19031 unsafe extern "unadjusted" {
19032 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sm4e")]
19033 fn _svsm4e_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
19034 }
19035 unsafe { _svsm4e_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
19036}
19037#[doc = "SM4 key updates"]
19038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsm4ekey[_u32])"]
19039#[inline(always)]
19040#[target_feature(enable = "sve,sve2,sve2-sm4")]
19041#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19042#[cfg_attr(test, assert_instr(sm4ekey))]
19043pub fn svsm4ekey_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
19044 unsafe extern "unadjusted" {
19045 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sm4ekey")]
19046 fn _svsm4ekey_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
19047 }
19048 unsafe { _svsm4ekey_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
19049}
19050#[doc = "Saturating add with signed addend"]
19051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_m)"]
19052#[inline(always)]
19053#[target_feature(enable = "sve,sve2")]
19054#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19055#[cfg_attr(test, assert_instr(usqadd))]
19056pub fn svsqadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
19057 unsafe extern "unadjusted" {
19058 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv16i8")]
19059 fn _svsqadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
19060 }
19061 unsafe { _svsqadd_u8_m(pg, op1.as_signed(), op2).as_unsigned() }
19062}
19063#[doc = "Saturating add with signed addend"]
19064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_m)"]
19065#[inline(always)]
19066#[target_feature(enable = "sve,sve2")]
19067#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19068#[cfg_attr(test, assert_instr(usqadd))]
19069pub fn svsqadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
19070 svsqadd_u8_m(pg, op1, svdup_n_s8(op2))
19071}
19072#[doc = "Saturating add with signed addend"]
19073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_x)"]
19074#[inline(always)]
19075#[target_feature(enable = "sve,sve2")]
19076#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19077#[cfg_attr(test, assert_instr(usqadd))]
19078pub fn svsqadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
19079 svsqadd_u8_m(pg, op1, op2)
19080}
19081#[doc = "Saturating add with signed addend"]
19082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_x)"]
19083#[inline(always)]
19084#[target_feature(enable = "sve,sve2")]
19085#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19086#[cfg_attr(test, assert_instr(usqadd))]
19087pub fn svsqadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
19088 svsqadd_u8_x(pg, op1, svdup_n_s8(op2))
19089}
19090#[doc = "Saturating add with signed addend"]
19091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_z)"]
19092#[inline(always)]
19093#[target_feature(enable = "sve,sve2")]
19094#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19095#[cfg_attr(test, assert_instr(usqadd))]
19096pub fn svsqadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
19097 svsqadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
19098}
19099#[doc = "Saturating add with signed addend"]
19100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_z)"]
19101#[inline(always)]
19102#[target_feature(enable = "sve,sve2")]
19103#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19104#[cfg_attr(test, assert_instr(usqadd))]
19105pub fn svsqadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
19106 svsqadd_u8_z(pg, op1, svdup_n_s8(op2))
19107}
19108#[doc = "Saturating add with signed addend"]
19109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_m)"]
19110#[inline(always)]
19111#[target_feature(enable = "sve,sve2")]
19112#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19113#[cfg_attr(test, assert_instr(usqadd))]
19114pub fn svsqadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
19115 unsafe extern "unadjusted" {
19116 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv8i16")]
19117 fn _svsqadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
19118 }
19119 unsafe { _svsqadd_u16_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() }
19120}
19121#[doc = "Saturating add with signed addend"]
19122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_m)"]
19123#[inline(always)]
19124#[target_feature(enable = "sve,sve2")]
19125#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19126#[cfg_attr(test, assert_instr(usqadd))]
19127pub fn svsqadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
19128 svsqadd_u16_m(pg, op1, svdup_n_s16(op2))
19129}
19130#[doc = "Saturating add with signed addend"]
19131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_x)"]
19132#[inline(always)]
19133#[target_feature(enable = "sve,sve2")]
19134#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19135#[cfg_attr(test, assert_instr(usqadd))]
19136pub fn svsqadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
19137 svsqadd_u16_m(pg, op1, op2)
19138}
19139#[doc = "Saturating add with signed addend"]
19140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_x)"]
19141#[inline(always)]
19142#[target_feature(enable = "sve,sve2")]
19143#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19144#[cfg_attr(test, assert_instr(usqadd))]
19145pub fn svsqadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
19146 svsqadd_u16_x(pg, op1, svdup_n_s16(op2))
19147}
19148#[doc = "Saturating add with signed addend"]
19149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_z)"]
19150#[inline(always)]
19151#[target_feature(enable = "sve,sve2")]
19152#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19153#[cfg_attr(test, assert_instr(usqadd))]
19154pub fn svsqadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
19155 svsqadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
19156}
19157#[doc = "Saturating add with signed addend"]
19158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_z)"]
19159#[inline(always)]
19160#[target_feature(enable = "sve,sve2")]
19161#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19162#[cfg_attr(test, assert_instr(usqadd))]
19163pub fn svsqadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
19164 svsqadd_u16_z(pg, op1, svdup_n_s16(op2))
19165}
19166#[doc = "Saturating add with signed addend"]
19167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_m)"]
19168#[inline(always)]
19169#[target_feature(enable = "sve,sve2")]
19170#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19171#[cfg_attr(test, assert_instr(usqadd))]
19172pub fn svsqadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
19173 unsafe extern "unadjusted" {
19174 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv4i32")]
19175 fn _svsqadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
19176 }
19177 unsafe { _svsqadd_u32_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() }
19178}
19179#[doc = "Saturating add with signed addend"]
19180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_m)"]
19181#[inline(always)]
19182#[target_feature(enable = "sve,sve2")]
19183#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19184#[cfg_attr(test, assert_instr(usqadd))]
19185pub fn svsqadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
19186 svsqadd_u32_m(pg, op1, svdup_n_s32(op2))
19187}
19188#[doc = "Saturating add with signed addend"]
19189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_x)"]
19190#[inline(always)]
19191#[target_feature(enable = "sve,sve2")]
19192#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19193#[cfg_attr(test, assert_instr(usqadd))]
19194pub fn svsqadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
19195 svsqadd_u32_m(pg, op1, op2)
19196}
19197#[doc = "Saturating add with signed addend"]
19198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_x)"]
19199#[inline(always)]
19200#[target_feature(enable = "sve,sve2")]
19201#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19202#[cfg_attr(test, assert_instr(usqadd))]
19203pub fn svsqadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
19204 svsqadd_u32_x(pg, op1, svdup_n_s32(op2))
19205}
19206#[doc = "Saturating add with signed addend"]
19207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_z)"]
19208#[inline(always)]
19209#[target_feature(enable = "sve,sve2")]
19210#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19211#[cfg_attr(test, assert_instr(usqadd))]
19212pub fn svsqadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
19213 svsqadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
19214}
19215#[doc = "Saturating add with signed addend"]
19216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_z)"]
19217#[inline(always)]
19218#[target_feature(enable = "sve,sve2")]
19219#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19220#[cfg_attr(test, assert_instr(usqadd))]
19221pub fn svsqadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
19222 svsqadd_u32_z(pg, op1, svdup_n_s32(op2))
19223}
19224#[doc = "Saturating add with signed addend"]
19225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_m)"]
19226#[inline(always)]
19227#[target_feature(enable = "sve,sve2")]
19228#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19229#[cfg_attr(test, assert_instr(usqadd))]
19230pub fn svsqadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
19231 unsafe extern "unadjusted" {
19232 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv2i64")]
19233 fn _svsqadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
19234 }
19235 unsafe { _svsqadd_u64_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() }
19236}
19237#[doc = "Saturating add with signed addend"]
19238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_m)"]
19239#[inline(always)]
19240#[target_feature(enable = "sve,sve2")]
19241#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19242#[cfg_attr(test, assert_instr(usqadd))]
19243pub fn svsqadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
19244 svsqadd_u64_m(pg, op1, svdup_n_s64(op2))
19245}
19246#[doc = "Saturating add with signed addend"]
19247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_x)"]
19248#[inline(always)]
19249#[target_feature(enable = "sve,sve2")]
19250#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19251#[cfg_attr(test, assert_instr(usqadd))]
19252pub fn svsqadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
19253 svsqadd_u64_m(pg, op1, op2)
19254}
19255#[doc = "Saturating add with signed addend"]
19256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_x)"]
19257#[inline(always)]
19258#[target_feature(enable = "sve,sve2")]
19259#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19260#[cfg_attr(test, assert_instr(usqadd))]
19261pub fn svsqadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
19262 svsqadd_u64_x(pg, op1, svdup_n_s64(op2))
19263}
19264#[doc = "Saturating add with signed addend"]
19265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_z)"]
19266#[inline(always)]
19267#[target_feature(enable = "sve,sve2")]
19268#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19269#[cfg_attr(test, assert_instr(usqadd))]
19270pub fn svsqadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
19271 svsqadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
19272}
19273#[doc = "Saturating add with signed addend"]
19274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_z)"]
19275#[inline(always)]
19276#[target_feature(enable = "sve,sve2")]
19277#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19278#[cfg_attr(test, assert_instr(usqadd))]
19279pub fn svsqadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
19280 svsqadd_u64_z(pg, op1, svdup_n_s64(op2))
19281}
19282#[doc = "Shift right and accumulate"]
19283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s8])"]
19284#[inline(always)]
19285#[target_feature(enable = "sve,sve2")]
19286#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19287#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))]
19288pub fn svsra_n_s8<const IMM3: i32>(op1: svint8_t, op2: svint8_t) -> svint8_t {
19289 static_assert_range!(IMM3, 1..=8);
19290 unsafe extern "unadjusted" {
19291 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv16i8")]
19292 fn _svsra_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t;
19293 }
19294 unsafe { _svsra_n_s8(op1, op2, IMM3) }
19295}
19296#[doc = "Shift right and accumulate"]
19297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s16])"]
19298#[inline(always)]
19299#[target_feature(enable = "sve,sve2")]
19300#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19301#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))]
19302pub fn svsra_n_s16<const IMM3: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
19303 static_assert_range!(IMM3, 1..=16);
19304 unsafe extern "unadjusted" {
19305 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv8i16")]
19306 fn _svsra_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t;
19307 }
19308 unsafe { _svsra_n_s16(op1, op2, IMM3) }
19309}
19310#[doc = "Shift right and accumulate"]
19311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s32])"]
19312#[inline(always)]
19313#[target_feature(enable = "sve,sve2")]
19314#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19315#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))]
19316pub fn svsra_n_s32<const IMM3: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
19317 static_assert_range!(IMM3, 1..=32);
19318 unsafe extern "unadjusted" {
19319 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv4i32")]
19320 fn _svsra_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
19321 }
19322 unsafe { _svsra_n_s32(op1, op2, IMM3) }
19323}
19324#[doc = "Shift right and accumulate"]
19325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s64])"]
19326#[inline(always)]
19327#[target_feature(enable = "sve,sve2")]
19328#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19329#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))]
19330pub fn svsra_n_s64<const IMM3: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
19331 static_assert_range!(IMM3, 1..=64);
19332 unsafe extern "unadjusted" {
19333 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv2i64")]
19334 fn _svsra_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t;
19335 }
19336 unsafe { _svsra_n_s64(op1, op2, IMM3) }
19337}
19338#[doc = "Shift right and accumulate"]
19339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u8])"]
19340#[inline(always)]
19341#[target_feature(enable = "sve,sve2")]
19342#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19343#[cfg_attr(test, assert_instr(usra, IMM3 = 1))]
19344pub fn svsra_n_u8<const IMM3: i32>(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
19345 static_assert_range!(IMM3, 1..=8);
19346 unsafe extern "unadjusted" {
19347 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv16i8")]
19348 fn _svsra_n_u8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t;
19349 }
19350 unsafe { _svsra_n_u8(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() }
19351}
19352#[doc = "Shift right and accumulate"]
19353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u16])"]
19354#[inline(always)]
19355#[target_feature(enable = "sve,sve2")]
19356#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19357#[cfg_attr(test, assert_instr(usra, IMM3 = 1))]
19358pub fn svsra_n_u16<const IMM3: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
19359 static_assert_range!(IMM3, 1..=16);
19360 unsafe extern "unadjusted" {
19361 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv8i16")]
19362 fn _svsra_n_u16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t;
19363 }
19364 unsafe { _svsra_n_u16(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() }
19365}
19366#[doc = "Shift right and accumulate"]
19367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u32])"]
19368#[inline(always)]
19369#[target_feature(enable = "sve,sve2")]
19370#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19371#[cfg_attr(test, assert_instr(usra, IMM3 = 1))]
19372pub fn svsra_n_u32<const IMM3: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
19373 static_assert_range!(IMM3, 1..=32);
19374 unsafe extern "unadjusted" {
19375 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv4i32")]
19376 fn _svsra_n_u32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
19377 }
19378 unsafe { _svsra_n_u32(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() }
19379}
19380#[doc = "Shift right and accumulate"]
19381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u64])"]
19382#[inline(always)]
19383#[target_feature(enable = "sve,sve2")]
19384#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19385#[cfg_attr(test, assert_instr(usra, IMM3 = 1))]
19386pub fn svsra_n_u64<const IMM3: i32>(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
19387 static_assert_range!(IMM3, 1..=64);
19388 unsafe extern "unadjusted" {
19389 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv2i64")]
19390 fn _svsra_n_u64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t;
19391 }
19392 unsafe { _svsra_n_u64(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() }
19393}
19394#[doc = "Shift right and insert"]
19395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s8])"]
19396#[inline(always)]
19397#[target_feature(enable = "sve,sve2")]
19398#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19399#[cfg_attr(test, assert_instr(sri, IMM3 = 1))]
19400pub fn svsri_n_s8<const IMM3: i32>(op1: svint8_t, op2: svint8_t) -> svint8_t {
19401 static_assert_range!(IMM3, 1..=8);
19402 unsafe extern "unadjusted" {
19403 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv16i8")]
19404 fn _svsri_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t;
19405 }
19406 unsafe { _svsri_n_s8(op1, op2, IMM3) }
19407}
19408#[doc = "Shift right and insert"]
19409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s16])"]
19410#[inline(always)]
19411#[target_feature(enable = "sve,sve2")]
19412#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19413#[cfg_attr(test, assert_instr(sri, IMM3 = 1))]
19414pub fn svsri_n_s16<const IMM3: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
19415 static_assert_range!(IMM3, 1..=16);
19416 unsafe extern "unadjusted" {
19417 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv8i16")]
19418 fn _svsri_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t;
19419 }
19420 unsafe { _svsri_n_s16(op1, op2, IMM3) }
19421}
19422#[doc = "Shift right and insert"]
19423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s32])"]
19424#[inline(always)]
19425#[target_feature(enable = "sve,sve2")]
19426#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19427#[cfg_attr(test, assert_instr(sri, IMM3 = 1))]
19428pub fn svsri_n_s32<const IMM3: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
19429 static_assert_range!(IMM3, 1..=32);
19430 unsafe extern "unadjusted" {
19431 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv4i32")]
19432 fn _svsri_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
19433 }
19434 unsafe { _svsri_n_s32(op1, op2, IMM3) }
19435}
19436#[doc = "Shift right and insert"]
19437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s64])"]
19438#[inline(always)]
19439#[target_feature(enable = "sve,sve2")]
19440#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19441#[cfg_attr(test, assert_instr(sri, IMM3 = 1))]
19442pub fn svsri_n_s64<const IMM3: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
19443 static_assert_range!(IMM3, 1..=64);
19444 unsafe extern "unadjusted" {
19445 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv2i64")]
19446 fn _svsri_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t;
19447 }
19448 unsafe { _svsri_n_s64(op1, op2, IMM3) }
19449}
19450#[doc = "Shift right and insert"]
19451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u8])"]
19452#[inline(always)]
19453#[target_feature(enable = "sve,sve2")]
19454#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19455#[cfg_attr(test, assert_instr(sri, IMM3 = 1))]
19456pub fn svsri_n_u8<const IMM3: i32>(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
19457 static_assert_range!(IMM3, 1..=8);
19458 unsafe { svsri_n_s8::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
19459}
19460#[doc = "Shift right and insert"]
19461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u16])"]
19462#[inline(always)]
19463#[target_feature(enable = "sve,sve2")]
19464#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19465#[cfg_attr(test, assert_instr(sri, IMM3 = 1))]
19466pub fn svsri_n_u16<const IMM3: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
19467 static_assert_range!(IMM3, 1..=16);
19468 unsafe { svsri_n_s16::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
19469}
19470#[doc = "Shift right and insert"]
19471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u32])"]
19472#[inline(always)]
19473#[target_feature(enable = "sve,sve2")]
19474#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19475#[cfg_attr(test, assert_instr(sri, IMM3 = 1))]
19476pub fn svsri_n_u32<const IMM3: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
19477 static_assert_range!(IMM3, 1..=32);
19478 unsafe { svsri_n_s32::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
19479}
19480#[doc = "Shift right and insert"]
19481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u64])"]
19482#[inline(always)]
19483#[target_feature(enable = "sve,sve2")]
19484#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19485#[cfg_attr(test, assert_instr(sri, IMM3 = 1))]
19486pub fn svsri_n_u64<const IMM3: i32>(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
19487 static_assert_range!(IMM3, 1..=64);
19488 unsafe { svsri_n_s64::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
19489}
19490#[doc = "Non-truncating store, non-temporal"]
19491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_f64])"]
19492#[doc = "## Safety"]
19493#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19494#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19495#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19496#[inline(always)]
19497#[target_feature(enable = "sve,sve2")]
19498#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19499#[cfg_attr(test, assert_instr(stnt1d))]
19500pub unsafe fn svstnt1_scatter_s64index_f64(
19501 pg: svbool_t,
19502 base: *mut f64,
19503 indices: svint64_t,
19504 data: svfloat64_t,
19505) {
19506 unsafe extern "unadjusted" {
19507 #[cfg_attr(
19508 target_arch = "aarch64",
19509 link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2f64"
19510 )]
19511 fn _svstnt1_scatter_s64index_f64(
19512 data: svfloat64_t,
19513 pg: svbool2_t,
19514 base: *mut f64,
19515 indices: svint64_t,
19516 );
19517 }
19518 _svstnt1_scatter_s64index_f64(data, pg.sve_into(), base, indices)
19519}
19520#[doc = "Non-truncating store, non-temporal"]
19521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_s64])"]
19522#[doc = "## Safety"]
19523#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19524#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19525#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19526#[inline(always)]
19527#[target_feature(enable = "sve,sve2")]
19528#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19529#[cfg_attr(test, assert_instr(stnt1d))]
19530pub unsafe fn svstnt1_scatter_s64index_s64(
19531 pg: svbool_t,
19532 base: *mut i64,
19533 indices: svint64_t,
19534 data: svint64_t,
19535) {
19536 unsafe extern "unadjusted" {
19537 #[cfg_attr(
19538 target_arch = "aarch64",
19539 link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i64"
19540 )]
19541 fn _svstnt1_scatter_s64index_s64(
19542 data: svint64_t,
19543 pg: svbool2_t,
19544 base: *mut i64,
19545 indices: svint64_t,
19546 );
19547 }
19548 _svstnt1_scatter_s64index_s64(data, pg.sve_into(), base, indices)
19549}
19550#[doc = "Non-truncating store, non-temporal"]
19551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_u64])"]
19552#[doc = "## Safety"]
19553#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19554#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19555#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19556#[inline(always)]
19557#[target_feature(enable = "sve,sve2")]
19558#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19559#[cfg_attr(test, assert_instr(stnt1d))]
19560pub unsafe fn svstnt1_scatter_s64index_u64(
19561 pg: svbool_t,
19562 base: *mut u64,
19563 indices: svint64_t,
19564 data: svuint64_t,
19565) {
19566 svstnt1_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed())
19567}
19568#[doc = "Non-truncating store, non-temporal"]
19569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_f64])"]
19570#[doc = "## Safety"]
19571#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19572#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19573#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19574#[inline(always)]
19575#[target_feature(enable = "sve,sve2")]
19576#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19577#[cfg_attr(test, assert_instr(stnt1d))]
19578pub unsafe fn svstnt1_scatter_u64index_f64(
19579 pg: svbool_t,
19580 base: *mut f64,
19581 indices: svuint64_t,
19582 data: svfloat64_t,
19583) {
19584 svstnt1_scatter_s64index_f64(pg, base, indices.as_signed(), data)
19585}
19586#[doc = "Non-truncating store, non-temporal"]
19587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_s64])"]
19588#[doc = "## Safety"]
19589#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19590#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19591#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19592#[inline(always)]
19593#[target_feature(enable = "sve,sve2")]
19594#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19595#[cfg_attr(test, assert_instr(stnt1d))]
19596pub unsafe fn svstnt1_scatter_u64index_s64(
19597 pg: svbool_t,
19598 base: *mut i64,
19599 indices: svuint64_t,
19600 data: svint64_t,
19601) {
19602 svstnt1_scatter_s64index_s64(pg, base, indices.as_signed(), data)
19603}
19604#[doc = "Non-truncating store, non-temporal"]
19605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_u64])"]
19606#[doc = "## Safety"]
19607#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19608#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19609#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19610#[inline(always)]
19611#[target_feature(enable = "sve,sve2")]
19612#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19613#[cfg_attr(test, assert_instr(stnt1d))]
19614pub unsafe fn svstnt1_scatter_u64index_u64(
19615 pg: svbool_t,
19616 base: *mut u64,
19617 indices: svuint64_t,
19618 data: svuint64_t,
19619) {
19620 svstnt1_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed())
19621}
19622#[doc = "Non-truncating store, non-temporal"]
19623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_f64])"]
19624#[doc = "## Safety"]
19625#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19626#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19627#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19628#[inline(always)]
19629#[target_feature(enable = "sve,sve2")]
19630#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19631#[cfg_attr(test, assert_instr(stnt1d))]
19632pub unsafe fn svstnt1_scatter_s64offset_f64(
19633 pg: svbool_t,
19634 base: *mut f64,
19635 offsets: svint64_t,
19636 data: svfloat64_t,
19637) {
19638 unsafe extern "unadjusted" {
19639 #[cfg_attr(
19640 target_arch = "aarch64",
19641 link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2f64"
19642 )]
19643 fn _svstnt1_scatter_s64offset_f64(
19644 data: svfloat64_t,
19645 pg: svbool2_t,
19646 base: *mut f64,
19647 offsets: svint64_t,
19648 );
19649 }
19650 _svstnt1_scatter_s64offset_f64(data, pg.sve_into(), base, offsets)
19651}
19652#[doc = "Non-truncating store, non-temporal"]
19653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_s64])"]
19654#[doc = "## Safety"]
19655#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19656#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19657#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19658#[inline(always)]
19659#[target_feature(enable = "sve,sve2")]
19660#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19661#[cfg_attr(test, assert_instr(stnt1d))]
19662pub unsafe fn svstnt1_scatter_s64offset_s64(
19663 pg: svbool_t,
19664 base: *mut i64,
19665 offsets: svint64_t,
19666 data: svint64_t,
19667) {
19668 unsafe extern "unadjusted" {
19669 #[cfg_attr(
19670 target_arch = "aarch64",
19671 link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i64"
19672 )]
19673 fn _svstnt1_scatter_s64offset_s64(
19674 data: svint64_t,
19675 pg: svbool2_t,
19676 base: *mut i64,
19677 offsets: svint64_t,
19678 );
19679 }
19680 _svstnt1_scatter_s64offset_s64(data, pg.sve_into(), base, offsets)
19681}
19682#[doc = "Non-truncating store, non-temporal"]
19683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_u64])"]
19684#[doc = "## Safety"]
19685#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19686#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19687#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19688#[inline(always)]
19689#[target_feature(enable = "sve,sve2")]
19690#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19691#[cfg_attr(test, assert_instr(stnt1d))]
19692pub unsafe fn svstnt1_scatter_s64offset_u64(
19693 pg: svbool_t,
19694 base: *mut u64,
19695 offsets: svint64_t,
19696 data: svuint64_t,
19697) {
19698 svstnt1_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed())
19699}
19700#[doc = "Non-truncating store, non-temporal"]
19701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_f32])"]
19702#[doc = "## Safety"]
19703#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19704#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19705#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19706#[inline(always)]
19707#[target_feature(enable = "sve,sve2")]
19708#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19709#[cfg_attr(test, assert_instr(stnt1w))]
19710pub unsafe fn svstnt1_scatter_u32offset_f32(
19711 pg: svbool_t,
19712 base: *mut f32,
19713 offsets: svuint32_t,
19714 data: svfloat32_t,
19715) {
19716 unsafe extern "unadjusted" {
19717 #[cfg_attr(
19718 target_arch = "aarch64",
19719 link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4f32"
19720 )]
19721 fn _svstnt1_scatter_u32offset_f32(
19722 data: svfloat32_t,
19723 pg: svbool4_t,
19724 base: *mut f32,
19725 offsets: svint32_t,
19726 );
19727 }
19728 _svstnt1_scatter_u32offset_f32(data, pg.sve_into(), base, offsets.as_signed())
19729}
19730#[doc = "Non-truncating store, non-temporal"]
19731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_s32])"]
19732#[doc = "## Safety"]
19733#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19734#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19735#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19736#[inline(always)]
19737#[target_feature(enable = "sve,sve2")]
19738#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19739#[cfg_attr(test, assert_instr(stnt1w))]
19740pub unsafe fn svstnt1_scatter_u32offset_s32(
19741 pg: svbool_t,
19742 base: *mut i32,
19743 offsets: svuint32_t,
19744 data: svint32_t,
19745) {
19746 unsafe extern "unadjusted" {
19747 #[cfg_attr(
19748 target_arch = "aarch64",
19749 link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i32"
19750 )]
19751 fn _svstnt1_scatter_u32offset_s32(
19752 data: svint32_t,
19753 pg: svbool4_t,
19754 base: *mut i32,
19755 offsets: svint32_t,
19756 );
19757 }
19758 _svstnt1_scatter_u32offset_s32(data, pg.sve_into(), base, offsets.as_signed())
19759}
19760#[doc = "Non-truncating store, non-temporal"]
19761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_u32])"]
19762#[doc = "## Safety"]
19763#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19764#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19765#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19766#[inline(always)]
19767#[target_feature(enable = "sve,sve2")]
19768#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19769#[cfg_attr(test, assert_instr(stnt1w))]
19770pub unsafe fn svstnt1_scatter_u32offset_u32(
19771 pg: svbool_t,
19772 base: *mut u32,
19773 offsets: svuint32_t,
19774 data: svuint32_t,
19775) {
19776 svstnt1_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
19777}
19778#[doc = "Non-truncating store, non-temporal"]
19779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_f64])"]
19780#[doc = "## Safety"]
19781#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19782#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19783#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19784#[inline(always)]
19785#[target_feature(enable = "sve,sve2")]
19786#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19787#[cfg_attr(test, assert_instr(stnt1d))]
19788pub unsafe fn svstnt1_scatter_u64offset_f64(
19789 pg: svbool_t,
19790 base: *mut f64,
19791 offsets: svuint64_t,
19792 data: svfloat64_t,
19793) {
19794 svstnt1_scatter_s64offset_f64(pg, base, offsets.as_signed(), data)
19795}
19796#[doc = "Non-truncating store, non-temporal"]
19797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_s64])"]
19798#[doc = "## Safety"]
19799#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19800#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19801#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19802#[inline(always)]
19803#[target_feature(enable = "sve,sve2")]
19804#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19805#[cfg_attr(test, assert_instr(stnt1d))]
19806pub unsafe fn svstnt1_scatter_u64offset_s64(
19807 pg: svbool_t,
19808 base: *mut i64,
19809 offsets: svuint64_t,
19810 data: svint64_t,
19811) {
19812 svstnt1_scatter_s64offset_s64(pg, base, offsets.as_signed(), data)
19813}
19814#[doc = "Non-truncating store, non-temporal"]
19815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_u64])"]
19816#[doc = "## Safety"]
19817#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19818#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19819#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19820#[inline(always)]
19821#[target_feature(enable = "sve,sve2")]
19822#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19823#[cfg_attr(test, assert_instr(stnt1d))]
19824pub unsafe fn svstnt1_scatter_u64offset_u64(
19825 pg: svbool_t,
19826 base: *mut u64,
19827 offsets: svuint64_t,
19828 data: svuint64_t,
19829) {
19830 svstnt1_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed())
19831}
19832#[doc = "Non-truncating store, non-temporal"]
19833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_f32])"]
19834#[doc = "## Safety"]
19835#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19836#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19837#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
19838#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19839#[inline(always)]
19840#[target_feature(enable = "sve,sve2")]
19841#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19842#[cfg_attr(test, assert_instr(stnt1w))]
19843pub unsafe fn svstnt1_scatter_u32base_f32(pg: svbool_t, bases: svuint32_t, data: svfloat32_t) {
19844 svstnt1_scatter_u32base_offset_f32(pg, bases, 0, data)
19845}
19846#[doc = "Non-truncating store, non-temporal"]
19847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_s32])"]
19848#[doc = "## Safety"]
19849#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19850#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19851#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
19852#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19853#[inline(always)]
19854#[target_feature(enable = "sve,sve2")]
19855#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19856#[cfg_attr(test, assert_instr(stnt1w))]
19857pub unsafe fn svstnt1_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) {
19858 svstnt1_scatter_u32base_offset_s32(pg, bases, 0, data)
19859}
19860#[doc = "Non-truncating store, non-temporal"]
19861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_u32])"]
19862#[doc = "## Safety"]
19863#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19864#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19865#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
19866#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19867#[inline(always)]
19868#[target_feature(enable = "sve,sve2")]
19869#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19870#[cfg_attr(test, assert_instr(stnt1w))]
19871pub unsafe fn svstnt1_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) {
19872 svstnt1_scatter_u32base_offset_u32(pg, bases, 0, data)
19873}
19874#[doc = "Non-truncating store, non-temporal"]
19875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_f64])"]
19876#[doc = "## Safety"]
19877#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19878#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19879#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
19880#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19881#[inline(always)]
19882#[target_feature(enable = "sve,sve2")]
19883#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19884#[cfg_attr(test, assert_instr(stnt1d))]
19885pub unsafe fn svstnt1_scatter_u64base_f64(pg: svbool_t, bases: svuint64_t, data: svfloat64_t) {
19886 svstnt1_scatter_u64base_offset_f64(pg, bases, 0, data)
19887}
19888#[doc = "Non-truncating store, non-temporal"]
19889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_s64])"]
19890#[doc = "## Safety"]
19891#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19892#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19893#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
19894#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19895#[inline(always)]
19896#[target_feature(enable = "sve,sve2")]
19897#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19898#[cfg_attr(test, assert_instr(stnt1d))]
19899pub unsafe fn svstnt1_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) {
19900 svstnt1_scatter_u64base_offset_s64(pg, bases, 0, data)
19901}
19902#[doc = "Non-truncating store, non-temporal"]
19903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_u64])"]
19904#[doc = "## Safety"]
19905#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19906#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19907#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
19908#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19909#[inline(always)]
19910#[target_feature(enable = "sve,sve2")]
19911#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19912#[cfg_attr(test, assert_instr(stnt1d))]
19913pub unsafe fn svstnt1_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) {
19914 svstnt1_scatter_u64base_offset_u64(pg, bases, 0, data)
19915}
19916#[doc = "Non-truncating store, non-temporal"]
19917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_f32])"]
19918#[doc = "## Safety"]
19919#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19920#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19921#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
19922#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19923#[inline(always)]
19924#[target_feature(enable = "sve,sve2")]
19925#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19926#[cfg_attr(test, assert_instr(stnt1w))]
19927pub unsafe fn svstnt1_scatter_u32base_index_f32(
19928 pg: svbool_t,
19929 bases: svuint32_t,
19930 index: i64,
19931 data: svfloat32_t,
19932) {
19933 svstnt1_scatter_u32base_offset_f32(pg, bases, index.unchecked_shl(2), data)
19934}
19935#[doc = "Non-truncating store, non-temporal"]
19936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_s32])"]
19937#[doc = "## Safety"]
19938#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19939#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19940#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
19941#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19942#[inline(always)]
19943#[target_feature(enable = "sve,sve2")]
19944#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19945#[cfg_attr(test, assert_instr(stnt1w))]
19946pub unsafe fn svstnt1_scatter_u32base_index_s32(
19947 pg: svbool_t,
19948 bases: svuint32_t,
19949 index: i64,
19950 data: svint32_t,
19951) {
19952 svstnt1_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(2), data)
19953}
19954#[doc = "Non-truncating store, non-temporal"]
19955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_u32])"]
19956#[doc = "## Safety"]
19957#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19958#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19959#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
19960#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19961#[inline(always)]
19962#[target_feature(enable = "sve,sve2")]
19963#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19964#[cfg_attr(test, assert_instr(stnt1w))]
19965pub unsafe fn svstnt1_scatter_u32base_index_u32(
19966 pg: svbool_t,
19967 bases: svuint32_t,
19968 index: i64,
19969 data: svuint32_t,
19970) {
19971 svstnt1_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(2), data)
19972}
19973#[doc = "Non-truncating store, non-temporal"]
19974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_f64])"]
19975#[doc = "## Safety"]
19976#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19977#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19978#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
19979#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19980#[inline(always)]
19981#[target_feature(enable = "sve,sve2")]
19982#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19983#[cfg_attr(test, assert_instr(stnt1d))]
19984pub unsafe fn svstnt1_scatter_u64base_index_f64(
19985 pg: svbool_t,
19986 bases: svuint64_t,
19987 index: i64,
19988 data: svfloat64_t,
19989) {
19990 svstnt1_scatter_u64base_offset_f64(pg, bases, index.unchecked_shl(3), data)
19991}
19992#[doc = "Non-truncating store, non-temporal"]
19993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_s64])"]
19994#[doc = "## Safety"]
19995#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
19996#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
19997#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
19998#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
19999#[inline(always)]
20000#[target_feature(enable = "sve,sve2")]
20001#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20002#[cfg_attr(test, assert_instr(stnt1d))]
20003pub unsafe fn svstnt1_scatter_u64base_index_s64(
20004 pg: svbool_t,
20005 bases: svuint64_t,
20006 index: i64,
20007 data: svint64_t,
20008) {
20009 svstnt1_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(3), data)
20010}
20011#[doc = "Non-truncating store, non-temporal"]
20012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_u64])"]
20013#[doc = "## Safety"]
20014#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20015#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20016#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20017#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20018#[inline(always)]
20019#[target_feature(enable = "sve,sve2")]
20020#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20021#[cfg_attr(test, assert_instr(stnt1d))]
20022pub unsafe fn svstnt1_scatter_u64base_index_u64(
20023 pg: svbool_t,
20024 bases: svuint64_t,
20025 index: i64,
20026 data: svuint64_t,
20027) {
20028 svstnt1_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(3), data)
20029}
20030#[doc = "Non-truncating store, non-temporal"]
20031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_f32])"]
20032#[doc = "## Safety"]
20033#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20034#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20035#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20036#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20037#[inline(always)]
20038#[target_feature(enable = "sve,sve2")]
20039#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20040#[cfg_attr(test, assert_instr(stnt1w))]
20041pub unsafe fn svstnt1_scatter_u32base_offset_f32(
20042 pg: svbool_t,
20043 bases: svuint32_t,
20044 offset: i64,
20045 data: svfloat32_t,
20046) {
20047 unsafe extern "unadjusted" {
20048 #[cfg_attr(
20049 target_arch = "aarch64",
20050 link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4f32.nxv4i32"
20051 )]
20052 fn _svstnt1_scatter_u32base_offset_f32(
20053 data: svfloat32_t,
20054 pg: svbool4_t,
20055 bases: svint32_t,
20056 offset: i64,
20057 );
20058 }
20059 _svstnt1_scatter_u32base_offset_f32(data, pg.sve_into(), bases.as_signed(), offset)
20060}
20061#[doc = "Non-truncating store, non-temporal"]
20062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_s32])"]
20063#[doc = "## Safety"]
20064#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20065#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20066#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20067#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20068#[inline(always)]
20069#[target_feature(enable = "sve,sve2")]
20070#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20071#[cfg_attr(test, assert_instr(stnt1w))]
20072pub unsafe fn svstnt1_scatter_u32base_offset_s32(
20073 pg: svbool_t,
20074 bases: svuint32_t,
20075 offset: i64,
20076 data: svint32_t,
20077) {
20078 unsafe extern "unadjusted" {
20079 #[cfg_attr(
20080 target_arch = "aarch64",
20081 link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i32.nxv4i32"
20082 )]
20083 fn _svstnt1_scatter_u32base_offset_s32(
20084 data: svint32_t,
20085 pg: svbool4_t,
20086 bases: svint32_t,
20087 offset: i64,
20088 );
20089 }
20090 _svstnt1_scatter_u32base_offset_s32(data, pg.sve_into(), bases.as_signed(), offset)
20091}
20092#[doc = "Non-truncating store, non-temporal"]
20093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_u32])"]
20094#[doc = "## Safety"]
20095#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20096#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20097#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20098#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20099#[inline(always)]
20100#[target_feature(enable = "sve,sve2")]
20101#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20102#[cfg_attr(test, assert_instr(stnt1w))]
20103pub unsafe fn svstnt1_scatter_u32base_offset_u32(
20104 pg: svbool_t,
20105 bases: svuint32_t,
20106 offset: i64,
20107 data: svuint32_t,
20108) {
20109 svstnt1_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed())
20110}
20111#[doc = "Non-truncating store, non-temporal"]
20112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_f64])"]
20113#[doc = "## Safety"]
20114#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20115#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20116#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20117#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20118#[inline(always)]
20119#[target_feature(enable = "sve,sve2")]
20120#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20121#[cfg_attr(test, assert_instr(stnt1d))]
20122pub unsafe fn svstnt1_scatter_u64base_offset_f64(
20123 pg: svbool_t,
20124 bases: svuint64_t,
20125 offset: i64,
20126 data: svfloat64_t,
20127) {
20128 unsafe extern "unadjusted" {
20129 #[cfg_attr(
20130 target_arch = "aarch64",
20131 link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2f64.nxv2i64"
20132 )]
20133 fn _svstnt1_scatter_u64base_offset_f64(
20134 data: svfloat64_t,
20135 pg: svbool2_t,
20136 bases: svint64_t,
20137 offset: i64,
20138 );
20139 }
20140 _svstnt1_scatter_u64base_offset_f64(data, pg.sve_into(), bases.as_signed(), offset)
20141}
20142#[doc = "Non-truncating store, non-temporal"]
20143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_s64])"]
20144#[doc = "## Safety"]
20145#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20146#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20147#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20148#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20149#[inline(always)]
20150#[target_feature(enable = "sve,sve2")]
20151#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20152#[cfg_attr(test, assert_instr(stnt1d))]
20153pub unsafe fn svstnt1_scatter_u64base_offset_s64(
20154 pg: svbool_t,
20155 bases: svuint64_t,
20156 offset: i64,
20157 data: svint64_t,
20158) {
20159 unsafe extern "unadjusted" {
20160 #[cfg_attr(
20161 target_arch = "aarch64",
20162 link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i64.nxv2i64"
20163 )]
20164 fn _svstnt1_scatter_u64base_offset_s64(
20165 data: svint64_t,
20166 pg: svbool2_t,
20167 bases: svint64_t,
20168 offset: i64,
20169 );
20170 }
20171 _svstnt1_scatter_u64base_offset_s64(data, pg.sve_into(), bases.as_signed(), offset)
20172}
20173#[doc = "Non-truncating store, non-temporal"]
20174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_u64])"]
20175#[doc = "## Safety"]
20176#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20177#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20178#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20179#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20180#[inline(always)]
20181#[target_feature(enable = "sve,sve2")]
20182#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20183#[cfg_attr(test, assert_instr(stnt1d))]
20184pub unsafe fn svstnt1_scatter_u64base_offset_u64(
20185 pg: svbool_t,
20186 bases: svuint64_t,
20187 offset: i64,
20188 data: svuint64_t,
20189) {
20190 svstnt1_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed())
20191}
20192#[doc = "Truncate to 8 bits and store, non-temporal"]
20193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[s64]offset[_s64])"]
20194#[doc = "## Safety"]
20195#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20196#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20197#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20198#[inline(always)]
20199#[target_feature(enable = "sve,sve2")]
20200#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20201#[cfg_attr(test, assert_instr(stnt1b))]
20202pub unsafe fn svstnt1b_scatter_s64offset_s64(
20203 pg: svbool_t,
20204 base: *mut i8,
20205 offsets: svint64_t,
20206 data: svint64_t,
20207) {
20208 unsafe extern "unadjusted" {
20209 #[cfg_attr(
20210 target_arch = "aarch64",
20211 link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i8"
20212 )]
20213 fn _svstnt1b_scatter_s64offset_s64(
20214 data: nxv2i8,
20215 pg: svbool2_t,
20216 base: *mut i8,
20217 offsets: svint64_t,
20218 );
20219 }
20220 _svstnt1b_scatter_s64offset_s64(
20221 crate::intrinsics::simd::simd_cast(data),
20222 pg.sve_into(),
20223 base,
20224 offsets,
20225 )
20226}
20227#[doc = "Truncate to 16 bits and store, non-temporal"]
20228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]offset[_s64])"]
20229#[doc = "## Safety"]
20230#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20231#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20232#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20233#[inline(always)]
20234#[target_feature(enable = "sve,sve2")]
20235#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20236#[cfg_attr(test, assert_instr(stnt1h))]
20237pub unsafe fn svstnt1h_scatter_s64offset_s64(
20238 pg: svbool_t,
20239 base: *mut i16,
20240 offsets: svint64_t,
20241 data: svint64_t,
20242) {
20243 unsafe extern "unadjusted" {
20244 #[cfg_attr(
20245 target_arch = "aarch64",
20246 link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i16"
20247 )]
20248 fn _svstnt1h_scatter_s64offset_s64(
20249 data: nxv2i16,
20250 pg: svbool2_t,
20251 base: *mut i16,
20252 offsets: svint64_t,
20253 );
20254 }
20255 _svstnt1h_scatter_s64offset_s64(
20256 crate::intrinsics::simd::simd_cast(data),
20257 pg.sve_into(),
20258 base,
20259 offsets,
20260 )
20261}
20262#[doc = "Truncate to 32 bits and store, non-temporal"]
20263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]offset[_s64])"]
20264#[doc = "## Safety"]
20265#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20266#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20267#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20268#[inline(always)]
20269#[target_feature(enable = "sve,sve2")]
20270#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20271#[cfg_attr(test, assert_instr(stnt1w))]
20272pub unsafe fn svstnt1w_scatter_s64offset_s64(
20273 pg: svbool_t,
20274 base: *mut i32,
20275 offsets: svint64_t,
20276 data: svint64_t,
20277) {
20278 unsafe extern "unadjusted" {
20279 #[cfg_attr(
20280 target_arch = "aarch64",
20281 link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i32"
20282 )]
20283 fn _svstnt1w_scatter_s64offset_s64(
20284 data: nxv2i32,
20285 pg: svbool2_t,
20286 base: *mut i32,
20287 offsets: svint64_t,
20288 );
20289 }
20290 _svstnt1w_scatter_s64offset_s64(
20291 crate::intrinsics::simd::simd_cast(data),
20292 pg.sve_into(),
20293 base,
20294 offsets,
20295 )
20296}
20297#[doc = "Truncate to 8 bits and store, non-temporal"]
20298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[s64]offset[_u64])"]
20299#[doc = "## Safety"]
20300#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20301#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20302#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20303#[inline(always)]
20304#[target_feature(enable = "sve,sve2")]
20305#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20306#[cfg_attr(test, assert_instr(stnt1b))]
20307pub unsafe fn svstnt1b_scatter_s64offset_u64(
20308 pg: svbool_t,
20309 base: *mut u8,
20310 offsets: svint64_t,
20311 data: svuint64_t,
20312) {
20313 svstnt1b_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed())
20314}
20315#[doc = "Truncate to 16 bits and store, non-temporal"]
20316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]offset[_u64])"]
20317#[doc = "## Safety"]
20318#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20319#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20320#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20321#[inline(always)]
20322#[target_feature(enable = "sve,sve2")]
20323#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20324#[cfg_attr(test, assert_instr(stnt1h))]
20325pub unsafe fn svstnt1h_scatter_s64offset_u64(
20326 pg: svbool_t,
20327 base: *mut u16,
20328 offsets: svint64_t,
20329 data: svuint64_t,
20330) {
20331 svstnt1h_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed())
20332}
20333#[doc = "Truncate to 32 bits and store, non-temporal"]
20334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]offset[_u64])"]
20335#[doc = "## Safety"]
20336#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20337#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20338#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20339#[inline(always)]
20340#[target_feature(enable = "sve,sve2")]
20341#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20342#[cfg_attr(test, assert_instr(stnt1w))]
20343pub unsafe fn svstnt1w_scatter_s64offset_u64(
20344 pg: svbool_t,
20345 base: *mut u32,
20346 offsets: svint64_t,
20347 data: svuint64_t,
20348) {
20349 svstnt1w_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed())
20350}
20351#[doc = "Truncate to 8 bits and store, non-temporal"]
20352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u32]offset[_s32])"]
20353#[doc = "## Safety"]
20354#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20355#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20356#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20357#[inline(always)]
20358#[target_feature(enable = "sve,sve2")]
20359#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20360#[cfg_attr(test, assert_instr(stnt1b))]
20361pub unsafe fn svstnt1b_scatter_u32offset_s32(
20362 pg: svbool_t,
20363 base: *mut i8,
20364 offsets: svuint32_t,
20365 data: svint32_t,
20366) {
20367 unsafe extern "unadjusted" {
20368 #[cfg_attr(
20369 target_arch = "aarch64",
20370 link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i8"
20371 )]
20372 fn _svstnt1b_scatter_u32offset_s32(
20373 data: nxv4i8,
20374 pg: svbool4_t,
20375 base: *mut i8,
20376 offsets: svint32_t,
20377 );
20378 }
20379 _svstnt1b_scatter_u32offset_s32(
20380 crate::intrinsics::simd::simd_cast(data),
20381 pg.sve_into(),
20382 base,
20383 offsets.as_signed(),
20384 )
20385}
20386#[doc = "Truncate to 16 bits and store, non-temporal"]
20387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u32]offset[_s32])"]
20388#[doc = "## Safety"]
20389#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20390#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20391#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20392#[inline(always)]
20393#[target_feature(enable = "sve,sve2")]
20394#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20395#[cfg_attr(test, assert_instr(stnt1h))]
20396pub unsafe fn svstnt1h_scatter_u32offset_s32(
20397 pg: svbool_t,
20398 base: *mut i16,
20399 offsets: svuint32_t,
20400 data: svint32_t,
20401) {
20402 unsafe extern "unadjusted" {
20403 #[cfg_attr(
20404 target_arch = "aarch64",
20405 link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i16"
20406 )]
20407 fn _svstnt1h_scatter_u32offset_s32(
20408 data: nxv4i16,
20409 pg: svbool4_t,
20410 base: *mut i16,
20411 offsets: svint32_t,
20412 );
20413 }
20414 _svstnt1h_scatter_u32offset_s32(
20415 crate::intrinsics::simd::simd_cast(data),
20416 pg.sve_into(),
20417 base,
20418 offsets.as_signed(),
20419 )
20420}
20421#[doc = "Truncate to 8 bits and store, non-temporal"]
20422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u32]offset[_u32])"]
20423#[doc = "## Safety"]
20424#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20425#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20426#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20427#[inline(always)]
20428#[target_feature(enable = "sve,sve2")]
20429#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20430#[cfg_attr(test, assert_instr(stnt1b))]
20431pub unsafe fn svstnt1b_scatter_u32offset_u32(
20432 pg: svbool_t,
20433 base: *mut u8,
20434 offsets: svuint32_t,
20435 data: svuint32_t,
20436) {
20437 svstnt1b_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
20438}
20439#[doc = "Truncate to 16 bits and store, non-temporal"]
20440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u32]offset[_u32])"]
20441#[doc = "## Safety"]
20442#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20443#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20444#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20445#[inline(always)]
20446#[target_feature(enable = "sve,sve2")]
20447#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20448#[cfg_attr(test, assert_instr(stnt1h))]
20449pub unsafe fn svstnt1h_scatter_u32offset_u32(
20450 pg: svbool_t,
20451 base: *mut u16,
20452 offsets: svuint32_t,
20453 data: svuint32_t,
20454) {
20455 svstnt1h_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
20456}
20457#[doc = "Truncate to 8 bits and store, non-temporal"]
20458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u64]offset[_s64])"]
20459#[doc = "## Safety"]
20460#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20461#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20462#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20463#[inline(always)]
20464#[target_feature(enable = "sve,sve2")]
20465#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20466#[cfg_attr(test, assert_instr(stnt1b))]
20467pub unsafe fn svstnt1b_scatter_u64offset_s64(
20468 pg: svbool_t,
20469 base: *mut i8,
20470 offsets: svuint64_t,
20471 data: svint64_t,
20472) {
20473 svstnt1b_scatter_s64offset_s64(pg, base, offsets.as_signed(), data)
20474}
20475#[doc = "Truncate to 16 bits and store, non-temporal"]
20476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]offset[_s64])"]
20477#[doc = "## Safety"]
20478#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20479#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20480#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20481#[inline(always)]
20482#[target_feature(enable = "sve,sve2")]
20483#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20484#[cfg_attr(test, assert_instr(stnt1h))]
20485pub unsafe fn svstnt1h_scatter_u64offset_s64(
20486 pg: svbool_t,
20487 base: *mut i16,
20488 offsets: svuint64_t,
20489 data: svint64_t,
20490) {
20491 svstnt1h_scatter_s64offset_s64(pg, base, offsets.as_signed(), data)
20492}
20493#[doc = "Truncate to 32 bits and store, non-temporal"]
20494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]offset[_s64])"]
20495#[doc = "## Safety"]
20496#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20497#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20498#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20499#[inline(always)]
20500#[target_feature(enable = "sve,sve2")]
20501#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20502#[cfg_attr(test, assert_instr(stnt1w))]
20503pub unsafe fn svstnt1w_scatter_u64offset_s64(
20504 pg: svbool_t,
20505 base: *mut i32,
20506 offsets: svuint64_t,
20507 data: svint64_t,
20508) {
20509 svstnt1w_scatter_s64offset_s64(pg, base, offsets.as_signed(), data)
20510}
20511#[doc = "Truncate to 8 bits and store, non-temporal"]
20512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u64]offset[_u64])"]
20513#[doc = "## Safety"]
20514#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20515#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20516#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20517#[inline(always)]
20518#[target_feature(enable = "sve,sve2")]
20519#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20520#[cfg_attr(test, assert_instr(stnt1b))]
20521pub unsafe fn svstnt1b_scatter_u64offset_u64(
20522 pg: svbool_t,
20523 base: *mut u8,
20524 offsets: svuint64_t,
20525 data: svuint64_t,
20526) {
20527 svstnt1b_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed())
20528}
20529#[doc = "Truncate to 16 bits and store, non-temporal"]
20530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]offset[_u64])"]
20531#[doc = "## Safety"]
20532#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20533#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20534#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20535#[inline(always)]
20536#[target_feature(enable = "sve,sve2")]
20537#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20538#[cfg_attr(test, assert_instr(stnt1h))]
20539pub unsafe fn svstnt1h_scatter_u64offset_u64(
20540 pg: svbool_t,
20541 base: *mut u16,
20542 offsets: svuint64_t,
20543 data: svuint64_t,
20544) {
20545 svstnt1h_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed())
20546}
20547#[doc = "Truncate to 32 bits and store, non-temporal"]
20548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]offset[_u64])"]
20549#[doc = "## Safety"]
20550#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20551#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20552#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20553#[inline(always)]
20554#[target_feature(enable = "sve,sve2")]
20555#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20556#[cfg_attr(test, assert_instr(stnt1w))]
20557pub unsafe fn svstnt1w_scatter_u64offset_u64(
20558 pg: svbool_t,
20559 base: *mut u32,
20560 offsets: svuint64_t,
20561 data: svuint64_t,
20562) {
20563 svstnt1w_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed())
20564}
20565#[doc = "Truncate to 8 bits and store, non-temporal"]
20566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base]_offset[_s32])"]
20567#[doc = "## Safety"]
20568#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20569#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20570#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20571#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20572#[inline(always)]
20573#[target_feature(enable = "sve,sve2")]
20574#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20575#[cfg_attr(test, assert_instr(stnt1b))]
20576pub unsafe fn svstnt1b_scatter_u32base_offset_s32(
20577 pg: svbool_t,
20578 bases: svuint32_t,
20579 offset: i64,
20580 data: svint32_t,
20581) {
20582 unsafe extern "unadjusted" {
20583 #[cfg_attr(
20584 target_arch = "aarch64",
20585 link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i8.nxv4i32"
20586 )]
20587 fn _svstnt1b_scatter_u32base_offset_s32(
20588 data: nxv4i8,
20589 pg: svbool4_t,
20590 bases: svint32_t,
20591 offset: i64,
20592 );
20593 }
20594 _svstnt1b_scatter_u32base_offset_s32(
20595 crate::intrinsics::simd::simd_cast(data),
20596 pg.sve_into(),
20597 bases.as_signed(),
20598 offset,
20599 )
20600}
20601#[doc = "Truncate to 16 bits and store, non-temporal"]
20602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_offset[_s32])"]
20603#[doc = "## Safety"]
20604#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20605#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20606#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20607#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20608#[inline(always)]
20609#[target_feature(enable = "sve,sve2")]
20610#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20611#[cfg_attr(test, assert_instr(stnt1h))]
20612pub unsafe fn svstnt1h_scatter_u32base_offset_s32(
20613 pg: svbool_t,
20614 bases: svuint32_t,
20615 offset: i64,
20616 data: svint32_t,
20617) {
20618 unsafe extern "unadjusted" {
20619 #[cfg_attr(
20620 target_arch = "aarch64",
20621 link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i16.nxv4i32"
20622 )]
20623 fn _svstnt1h_scatter_u32base_offset_s32(
20624 data: nxv4i16,
20625 pg: svbool4_t,
20626 bases: svint32_t,
20627 offset: i64,
20628 );
20629 }
20630 _svstnt1h_scatter_u32base_offset_s32(
20631 crate::intrinsics::simd::simd_cast(data),
20632 pg.sve_into(),
20633 bases.as_signed(),
20634 offset,
20635 )
20636}
20637#[doc = "Truncate to 8 bits and store, non-temporal"]
20638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base]_offset[_u32])"]
20639#[doc = "## Safety"]
20640#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20641#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20642#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20643#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20644#[inline(always)]
20645#[target_feature(enable = "sve,sve2")]
20646#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20647#[cfg_attr(test, assert_instr(stnt1b))]
20648pub unsafe fn svstnt1b_scatter_u32base_offset_u32(
20649 pg: svbool_t,
20650 bases: svuint32_t,
20651 offset: i64,
20652 data: svuint32_t,
20653) {
20654 svstnt1b_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed())
20655}
20656#[doc = "Truncate to 16 bits and store, non-temporal"]
20657#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_offset[_u32])"]
20658#[doc = "## Safety"]
20659#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20660#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20661#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20662#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20663#[inline(always)]
20664#[target_feature(enable = "sve,sve2")]
20665#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20666#[cfg_attr(test, assert_instr(stnt1h))]
20667pub unsafe fn svstnt1h_scatter_u32base_offset_u32(
20668 pg: svbool_t,
20669 bases: svuint32_t,
20670 offset: i64,
20671 data: svuint32_t,
20672) {
20673 svstnt1h_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed())
20674}
20675#[doc = "Truncate to 8 bits and store, non-temporal"]
20676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base]_offset[_s64])"]
20677#[doc = "## Safety"]
20678#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20679#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20680#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20681#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20682#[inline(always)]
20683#[target_feature(enable = "sve,sve2")]
20684#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20685#[cfg_attr(test, assert_instr(stnt1b))]
20686pub unsafe fn svstnt1b_scatter_u64base_offset_s64(
20687 pg: svbool_t,
20688 bases: svuint64_t,
20689 offset: i64,
20690 data: svint64_t,
20691) {
20692 unsafe extern "unadjusted" {
20693 #[cfg_attr(
20694 target_arch = "aarch64",
20695 link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i8.nxv2i64"
20696 )]
20697 fn _svstnt1b_scatter_u64base_offset_s64(
20698 data: nxv2i8,
20699 pg: svbool2_t,
20700 bases: svint64_t,
20701 offset: i64,
20702 );
20703 }
20704 _svstnt1b_scatter_u64base_offset_s64(
20705 crate::intrinsics::simd::simd_cast(data),
20706 pg.sve_into(),
20707 bases.as_signed(),
20708 offset,
20709 )
20710}
20711#[doc = "Truncate to 16 bits and store, non-temporal"]
20712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_offset[_s64])"]
20713#[doc = "## Safety"]
20714#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20715#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20716#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20717#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20718#[inline(always)]
20719#[target_feature(enable = "sve,sve2")]
20720#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20721#[cfg_attr(test, assert_instr(stnt1h))]
20722pub unsafe fn svstnt1h_scatter_u64base_offset_s64(
20723 pg: svbool_t,
20724 bases: svuint64_t,
20725 offset: i64,
20726 data: svint64_t,
20727) {
20728 unsafe extern "unadjusted" {
20729 #[cfg_attr(
20730 target_arch = "aarch64",
20731 link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i16.nxv2i64"
20732 )]
20733 fn _svstnt1h_scatter_u64base_offset_s64(
20734 data: nxv2i16,
20735 pg: svbool2_t,
20736 bases: svint64_t,
20737 offset: i64,
20738 );
20739 }
20740 _svstnt1h_scatter_u64base_offset_s64(
20741 crate::intrinsics::simd::simd_cast(data),
20742 pg.sve_into(),
20743 bases.as_signed(),
20744 offset,
20745 )
20746}
20747#[doc = "Truncate to 32 bits and store, non-temporal"]
20748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_offset[_s64])"]
20749#[doc = "## Safety"]
20750#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20751#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20752#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20753#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20754#[inline(always)]
20755#[target_feature(enable = "sve,sve2")]
20756#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20757#[cfg_attr(test, assert_instr(stnt1w))]
20758pub unsafe fn svstnt1w_scatter_u64base_offset_s64(
20759 pg: svbool_t,
20760 bases: svuint64_t,
20761 offset: i64,
20762 data: svint64_t,
20763) {
20764 unsafe extern "unadjusted" {
20765 #[cfg_attr(
20766 target_arch = "aarch64",
20767 link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i32.nxv2i64"
20768 )]
20769 fn _svstnt1w_scatter_u64base_offset_s64(
20770 data: nxv2i32,
20771 pg: svbool2_t,
20772 bases: svint64_t,
20773 offset: i64,
20774 );
20775 }
20776 _svstnt1w_scatter_u64base_offset_s64(
20777 crate::intrinsics::simd::simd_cast(data),
20778 pg.sve_into(),
20779 bases.as_signed(),
20780 offset,
20781 )
20782}
20783#[doc = "Truncate to 8 bits and store, non-temporal"]
20784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base]_offset[_u64])"]
20785#[doc = "## Safety"]
20786#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20787#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20788#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20789#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20790#[inline(always)]
20791#[target_feature(enable = "sve,sve2")]
20792#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20793#[cfg_attr(test, assert_instr(stnt1b))]
20794pub unsafe fn svstnt1b_scatter_u64base_offset_u64(
20795 pg: svbool_t,
20796 bases: svuint64_t,
20797 offset: i64,
20798 data: svuint64_t,
20799) {
20800 svstnt1b_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed())
20801}
20802#[doc = "Truncate to 16 bits and store, non-temporal"]
20803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_offset[_u64])"]
20804#[doc = "## Safety"]
20805#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20806#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20807#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20808#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20809#[inline(always)]
20810#[target_feature(enable = "sve,sve2")]
20811#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20812#[cfg_attr(test, assert_instr(stnt1h))]
20813pub unsafe fn svstnt1h_scatter_u64base_offset_u64(
20814 pg: svbool_t,
20815 bases: svuint64_t,
20816 offset: i64,
20817 data: svuint64_t,
20818) {
20819 svstnt1h_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed())
20820}
20821#[doc = "Truncate to 32 bits and store, non-temporal"]
20822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_offset[_u64])"]
20823#[doc = "## Safety"]
20824#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20825#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20826#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20827#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20828#[inline(always)]
20829#[target_feature(enable = "sve,sve2")]
20830#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20831#[cfg_attr(test, assert_instr(stnt1w))]
20832pub unsafe fn svstnt1w_scatter_u64base_offset_u64(
20833 pg: svbool_t,
20834 bases: svuint64_t,
20835 offset: i64,
20836 data: svuint64_t,
20837) {
20838 svstnt1w_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed())
20839}
20840#[doc = "Truncate to 8 bits and store, non-temporal"]
20841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base_s32])"]
20842#[doc = "## Safety"]
20843#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20844#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20845#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20846#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20847#[inline(always)]
20848#[target_feature(enable = "sve,sve2")]
20849#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20850#[cfg_attr(test, assert_instr(stnt1b))]
20851pub unsafe fn svstnt1b_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) {
20852 svstnt1b_scatter_u32base_offset_s32(pg, bases, 0, data)
20853}
20854#[doc = "Truncate to 16 bits and store, non-temporal"]
20855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base_s32])"]
20856#[doc = "## Safety"]
20857#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20858#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20859#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20860#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20861#[inline(always)]
20862#[target_feature(enable = "sve,sve2")]
20863#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20864#[cfg_attr(test, assert_instr(stnt1h))]
20865pub unsafe fn svstnt1h_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) {
20866 svstnt1h_scatter_u32base_offset_s32(pg, bases, 0, data)
20867}
20868#[doc = "Truncate to 8 bits and store, non-temporal"]
20869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base_u32])"]
20870#[doc = "## Safety"]
20871#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20872#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20873#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20874#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20875#[inline(always)]
20876#[target_feature(enable = "sve,sve2")]
20877#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20878#[cfg_attr(test, assert_instr(stnt1b))]
20879pub unsafe fn svstnt1b_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) {
20880 svstnt1b_scatter_u32base_offset_u32(pg, bases, 0, data)
20881}
20882#[doc = "Truncate to 16 bits and store, non-temporal"]
20883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base_u32])"]
20884#[doc = "## Safety"]
20885#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20886#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20887#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20888#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20889#[inline(always)]
20890#[target_feature(enable = "sve,sve2")]
20891#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20892#[cfg_attr(test, assert_instr(stnt1h))]
20893pub unsafe fn svstnt1h_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) {
20894 svstnt1h_scatter_u32base_offset_u32(pg, bases, 0, data)
20895}
20896#[doc = "Truncate to 8 bits and store, non-temporal"]
20897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base_s64])"]
20898#[doc = "## Safety"]
20899#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20900#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20901#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20902#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20903#[inline(always)]
20904#[target_feature(enable = "sve,sve2")]
20905#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20906#[cfg_attr(test, assert_instr(stnt1b))]
20907pub unsafe fn svstnt1b_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) {
20908 svstnt1b_scatter_u64base_offset_s64(pg, bases, 0, data)
20909}
20910#[doc = "Truncate to 16 bits and store, non-temporal"]
20911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base_s64])"]
20912#[doc = "## Safety"]
20913#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20914#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20915#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20916#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20917#[inline(always)]
20918#[target_feature(enable = "sve,sve2")]
20919#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20920#[cfg_attr(test, assert_instr(stnt1h))]
20921pub unsafe fn svstnt1h_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) {
20922 svstnt1h_scatter_u64base_offset_s64(pg, bases, 0, data)
20923}
20924#[doc = "Truncate to 32 bits and store, non-temporal"]
20925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base_s64])"]
20926#[doc = "## Safety"]
20927#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20928#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20929#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20930#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20931#[inline(always)]
20932#[target_feature(enable = "sve,sve2")]
20933#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20934#[cfg_attr(test, assert_instr(stnt1w))]
20935pub unsafe fn svstnt1w_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) {
20936 svstnt1w_scatter_u64base_offset_s64(pg, bases, 0, data)
20937}
20938#[doc = "Truncate to 8 bits and store, non-temporal"]
20939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base_u64])"]
20940#[doc = "## Safety"]
20941#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20942#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20943#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20944#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20945#[inline(always)]
20946#[target_feature(enable = "sve,sve2")]
20947#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20948#[cfg_attr(test, assert_instr(stnt1b))]
20949pub unsafe fn svstnt1b_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) {
20950 svstnt1b_scatter_u64base_offset_u64(pg, bases, 0, data)
20951}
20952#[doc = "Truncate to 16 bits and store, non-temporal"]
20953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base_u64])"]
20954#[doc = "## Safety"]
20955#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20956#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20957#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20958#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20959#[inline(always)]
20960#[target_feature(enable = "sve,sve2")]
20961#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20962#[cfg_attr(test, assert_instr(stnt1h))]
20963pub unsafe fn svstnt1h_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) {
20964 svstnt1h_scatter_u64base_offset_u64(pg, bases, 0, data)
20965}
20966#[doc = "Truncate to 32 bits and store, non-temporal"]
20967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base_u64])"]
20968#[doc = "## Safety"]
20969#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20970#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20971#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
20972#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20973#[inline(always)]
20974#[target_feature(enable = "sve,sve2")]
20975#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20976#[cfg_attr(test, assert_instr(stnt1w))]
20977pub unsafe fn svstnt1w_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) {
20978 svstnt1w_scatter_u64base_offset_u64(pg, bases, 0, data)
20979}
20980#[doc = "Truncate to 16 bits and store, non-temporal"]
20981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]index[_s64])"]
20982#[doc = "## Safety"]
20983#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
20984#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
20985#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
20986#[inline(always)]
20987#[target_feature(enable = "sve,sve2")]
20988#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20989#[cfg_attr(test, assert_instr(stnt1h))]
20990pub unsafe fn svstnt1h_scatter_s64index_s64(
20991 pg: svbool_t,
20992 base: *mut i16,
20993 indices: svint64_t,
20994 data: svint64_t,
20995) {
20996 unsafe extern "unadjusted" {
20997 #[cfg_attr(
20998 target_arch = "aarch64",
20999 link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i16"
21000 )]
21001 fn _svstnt1h_scatter_s64index_s64(
21002 data: nxv2i16,
21003 pg: svbool2_t,
21004 base: *mut i16,
21005 indices: svint64_t,
21006 );
21007 }
21008 _svstnt1h_scatter_s64index_s64(
21009 crate::intrinsics::simd::simd_cast(data),
21010 pg.sve_into(),
21011 base,
21012 indices,
21013 )
21014}
21015#[doc = "Truncate to 32 bits and store, non-temporal"]
21016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]index[_s64])"]
21017#[doc = "## Safety"]
21018#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
21019#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
21020#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
21021#[inline(always)]
21022#[target_feature(enable = "sve,sve2")]
21023#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21024#[cfg_attr(test, assert_instr(stnt1w))]
21025pub unsafe fn svstnt1w_scatter_s64index_s64(
21026 pg: svbool_t,
21027 base: *mut i32,
21028 indices: svint64_t,
21029 data: svint64_t,
21030) {
21031 unsafe extern "unadjusted" {
21032 #[cfg_attr(
21033 target_arch = "aarch64",
21034 link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i32"
21035 )]
21036 fn _svstnt1w_scatter_s64index_s64(
21037 data: nxv2i32,
21038 pg: svbool2_t,
21039 base: *mut i32,
21040 indices: svint64_t,
21041 );
21042 }
21043 _svstnt1w_scatter_s64index_s64(
21044 crate::intrinsics::simd::simd_cast(data),
21045 pg.sve_into(),
21046 base,
21047 indices,
21048 )
21049}
21050#[doc = "Truncate to 16 bits and store, non-temporal"]
21051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]index[_u64])"]
21052#[doc = "## Safety"]
21053#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
21054#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
21055#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
21056#[inline(always)]
21057#[target_feature(enable = "sve,sve2")]
21058#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21059#[cfg_attr(test, assert_instr(stnt1h))]
21060pub unsafe fn svstnt1h_scatter_s64index_u64(
21061 pg: svbool_t,
21062 base: *mut u16,
21063 indices: svint64_t,
21064 data: svuint64_t,
21065) {
21066 svstnt1h_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed())
21067}
21068#[doc = "Truncate to 32 bits and store, non-temporal"]
21069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]index[_u64])"]
21070#[doc = "## Safety"]
21071#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
21072#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
21073#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
21074#[inline(always)]
21075#[target_feature(enable = "sve,sve2")]
21076#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21077#[cfg_attr(test, assert_instr(stnt1w))]
21078pub unsafe fn svstnt1w_scatter_s64index_u64(
21079 pg: svbool_t,
21080 base: *mut u32,
21081 indices: svint64_t,
21082 data: svuint64_t,
21083) {
21084 svstnt1w_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed())
21085}
21086#[doc = "Truncate to 16 bits and store, non-temporal"]
21087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]index[_s64])"]
21088#[doc = "## Safety"]
21089#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
21090#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
21091#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
21092#[inline(always)]
21093#[target_feature(enable = "sve,sve2")]
21094#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21095#[cfg_attr(test, assert_instr(stnt1h))]
21096pub unsafe fn svstnt1h_scatter_u64index_s64(
21097 pg: svbool_t,
21098 base: *mut i16,
21099 indices: svuint64_t,
21100 data: svint64_t,
21101) {
21102 svstnt1h_scatter_s64index_s64(pg, base, indices.as_signed(), data)
21103}
21104#[doc = "Truncate to 32 bits and store, non-temporal"]
21105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]index[_s64])"]
21106#[doc = "## Safety"]
21107#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
21108#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
21109#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
21110#[inline(always)]
21111#[target_feature(enable = "sve,sve2")]
21112#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21113#[cfg_attr(test, assert_instr(stnt1w))]
21114pub unsafe fn svstnt1w_scatter_u64index_s64(
21115 pg: svbool_t,
21116 base: *mut i32,
21117 indices: svuint64_t,
21118 data: svint64_t,
21119) {
21120 svstnt1w_scatter_s64index_s64(pg, base, indices.as_signed(), data)
21121}
21122#[doc = "Truncate to 16 bits and store, non-temporal"]
21123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]index[_u64])"]
21124#[doc = "## Safety"]
21125#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
21126#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
21127#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
21128#[inline(always)]
21129#[target_feature(enable = "sve,sve2")]
21130#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21131#[cfg_attr(test, assert_instr(stnt1h))]
21132pub unsafe fn svstnt1h_scatter_u64index_u64(
21133 pg: svbool_t,
21134 base: *mut u16,
21135 indices: svuint64_t,
21136 data: svuint64_t,
21137) {
21138 svstnt1h_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed())
21139}
21140#[doc = "Truncate to 32 bits and store, non-temporal"]
21141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]index[_u64])"]
21142#[doc = "## Safety"]
21143#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
21144#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
21145#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
21146#[inline(always)]
21147#[target_feature(enable = "sve,sve2")]
21148#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21149#[cfg_attr(test, assert_instr(stnt1w))]
21150pub unsafe fn svstnt1w_scatter_u64index_u64(
21151 pg: svbool_t,
21152 base: *mut u32,
21153 indices: svuint64_t,
21154 data: svuint64_t,
21155) {
21156 svstnt1w_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed())
21157}
21158#[doc = "Truncate to 16 bits and store, non-temporal"]
21159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_index[_s32])"]
21160#[doc = "## Safety"]
21161#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
21162#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
21163#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
21164#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
21165#[inline(always)]
21166#[target_feature(enable = "sve,sve2")]
21167#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21168#[cfg_attr(test, assert_instr(stnt1h))]
21169pub unsafe fn svstnt1h_scatter_u32base_index_s32(
21170 pg: svbool_t,
21171 bases: svuint32_t,
21172 index: i64,
21173 data: svint32_t,
21174) {
21175 svstnt1h_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(1), data)
21176}
21177#[doc = "Truncate to 16 bits and store, non-temporal"]
21178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_index[_u32])"]
21179#[doc = "## Safety"]
21180#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
21181#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
21182#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
21183#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
21184#[inline(always)]
21185#[target_feature(enable = "sve,sve2")]
21186#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21187#[cfg_attr(test, assert_instr(stnt1h))]
21188pub unsafe fn svstnt1h_scatter_u32base_index_u32(
21189 pg: svbool_t,
21190 bases: svuint32_t,
21191 index: i64,
21192 data: svuint32_t,
21193) {
21194 svstnt1h_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(1), data)
21195}
21196#[doc = "Truncate to 16 bits and store, non-temporal"]
21197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_index[_s64])"]
21198#[doc = "## Safety"]
21199#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
21200#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
21201#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
21202#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
21203#[inline(always)]
21204#[target_feature(enable = "sve,sve2")]
21205#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21206#[cfg_attr(test, assert_instr(stnt1h))]
21207pub unsafe fn svstnt1h_scatter_u64base_index_s64(
21208 pg: svbool_t,
21209 bases: svuint64_t,
21210 index: i64,
21211 data: svint64_t,
21212) {
21213 svstnt1h_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(1), data)
21214}
21215#[doc = "Truncate to 32 bits and store, non-temporal"]
21216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_index[_s64])"]
21217#[doc = "## Safety"]
21218#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
21219#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
21220#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
21221#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
21222#[inline(always)]
21223#[target_feature(enable = "sve,sve2")]
21224#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21225#[cfg_attr(test, assert_instr(stnt1w))]
21226pub unsafe fn svstnt1w_scatter_u64base_index_s64(
21227 pg: svbool_t,
21228 bases: svuint64_t,
21229 index: i64,
21230 data: svint64_t,
21231) {
21232 svstnt1w_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(2), data)
21233}
21234#[doc = "Truncate to 16 bits and store, non-temporal"]
21235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_index[_u64])"]
21236#[doc = "## Safety"]
21237#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
21238#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
21239#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
21240#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
21241#[inline(always)]
21242#[target_feature(enable = "sve,sve2")]
21243#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21244#[cfg_attr(test, assert_instr(stnt1h))]
21245pub unsafe fn svstnt1h_scatter_u64base_index_u64(
21246 pg: svbool_t,
21247 bases: svuint64_t,
21248 index: i64,
21249 data: svuint64_t,
21250) {
21251 svstnt1h_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(1), data)
21252}
21253#[doc = "Truncate to 32 bits and store, non-temporal"]
21254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_index[_u64])"]
21255#[doc = "## Safety"]
21256#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
21257#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
21258#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."]
21259#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
21260#[inline(always)]
21261#[target_feature(enable = "sve,sve2")]
21262#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21263#[cfg_attr(test, assert_instr(stnt1w))]
21264pub unsafe fn svstnt1w_scatter_u64base_index_u64(
21265 pg: svbool_t,
21266 bases: svuint64_t,
21267 index: i64,
21268 data: svuint64_t,
21269) {
21270 svstnt1w_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(2), data)
21271}
21272#[doc = "Subtract narrow high part (bottom)"]
21273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s16])"]
21274#[inline(always)]
21275#[target_feature(enable = "sve,sve2")]
21276#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21277#[cfg_attr(test, assert_instr(subhnb))]
21278pub fn svsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t {
21279 unsafe extern "unadjusted" {
21280 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv8i16")]
21281 fn _svsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t;
21282 }
21283 unsafe { _svsubhnb_s16(op1, op2) }
21284}
21285#[doc = "Subtract narrow high part (bottom)"]
21286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s16])"]
21287#[inline(always)]
21288#[target_feature(enable = "sve,sve2")]
21289#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21290#[cfg_attr(test, assert_instr(subhnb))]
21291pub fn svsubhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t {
21292 svsubhnb_s16(op1, svdup_n_s16(op2))
21293}
21294#[doc = "Subtract narrow high part (bottom)"]
21295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s32])"]
21296#[inline(always)]
21297#[target_feature(enable = "sve,sve2")]
21298#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21299#[cfg_attr(test, assert_instr(subhnb))]
21300pub fn svsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t {
21301 unsafe extern "unadjusted" {
21302 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv4i32")]
21303 fn _svsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t;
21304 }
21305 unsafe { _svsubhnb_s32(op1, op2) }
21306}
21307#[doc = "Subtract narrow high part (bottom)"]
21308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s32])"]
21309#[inline(always)]
21310#[target_feature(enable = "sve,sve2")]
21311#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21312#[cfg_attr(test, assert_instr(subhnb))]
21313pub fn svsubhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t {
21314 svsubhnb_s32(op1, svdup_n_s32(op2))
21315}
21316#[doc = "Subtract narrow high part (bottom)"]
21317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s64])"]
21318#[inline(always)]
21319#[target_feature(enable = "sve,sve2")]
21320#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21321#[cfg_attr(test, assert_instr(subhnb))]
21322pub fn svsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t {
21323 unsafe extern "unadjusted" {
21324 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv2i64")]
21325 fn _svsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t;
21326 }
21327 unsafe { _svsubhnb_s64(op1, op2) }
21328}
21329#[doc = "Subtract narrow high part (bottom)"]
21330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s64])"]
21331#[inline(always)]
21332#[target_feature(enable = "sve,sve2")]
21333#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21334#[cfg_attr(test, assert_instr(subhnb))]
21335pub fn svsubhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t {
21336 svsubhnb_s64(op1, svdup_n_s64(op2))
21337}
21338#[doc = "Subtract narrow high part (bottom)"]
21339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u16])"]
21340#[inline(always)]
21341#[target_feature(enable = "sve,sve2")]
21342#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21343#[cfg_attr(test, assert_instr(subhnb))]
21344pub fn svsubhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t {
21345 unsafe { svsubhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
21346}
21347#[doc = "Subtract narrow high part (bottom)"]
21348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u16])"]
21349#[inline(always)]
21350#[target_feature(enable = "sve,sve2")]
21351#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21352#[cfg_attr(test, assert_instr(subhnb))]
21353pub fn svsubhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t {
21354 svsubhnb_u16(op1, svdup_n_u16(op2))
21355}
21356#[doc = "Subtract narrow high part (bottom)"]
21357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u32])"]
21358#[inline(always)]
21359#[target_feature(enable = "sve,sve2")]
21360#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21361#[cfg_attr(test, assert_instr(subhnb))]
21362pub fn svsubhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t {
21363 unsafe { svsubhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
21364}
21365#[doc = "Subtract narrow high part (bottom)"]
21366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u32])"]
21367#[inline(always)]
21368#[target_feature(enable = "sve,sve2")]
21369#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21370#[cfg_attr(test, assert_instr(subhnb))]
21371pub fn svsubhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t {
21372 svsubhnb_u32(op1, svdup_n_u32(op2))
21373}
21374#[doc = "Subtract narrow high part (bottom)"]
21375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u64])"]
21376#[inline(always)]
21377#[target_feature(enable = "sve,sve2")]
21378#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21379#[cfg_attr(test, assert_instr(subhnb))]
21380pub fn svsubhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t {
21381 unsafe { svsubhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
21382}
21383#[doc = "Subtract narrow high part (bottom)"]
21384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u64])"]
21385#[inline(always)]
21386#[target_feature(enable = "sve,sve2")]
21387#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21388#[cfg_attr(test, assert_instr(subhnb))]
21389pub fn svsubhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t {
21390 svsubhnb_u64(op1, svdup_n_u64(op2))
21391}
21392#[doc = "Subtract narrow high part (top)"]
21393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s16])"]
21394#[inline(always)]
21395#[target_feature(enable = "sve,sve2")]
21396#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21397#[cfg_attr(test, assert_instr(subhnt))]
21398pub fn svsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t {
21399 unsafe extern "unadjusted" {
21400 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv8i16")]
21401 fn _svsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t;
21402 }
21403 unsafe { _svsubhnt_s16(even, op1, op2) }
21404}
21405#[doc = "Subtract narrow high part (top)"]
21406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s16])"]
21407#[inline(always)]
21408#[target_feature(enable = "sve,sve2")]
21409#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21410#[cfg_attr(test, assert_instr(subhnt))]
21411pub fn svsubhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t {
21412 svsubhnt_s16(even, op1, svdup_n_s16(op2))
21413}
21414#[doc = "Subtract narrow high part (top)"]
21415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s32])"]
21416#[inline(always)]
21417#[target_feature(enable = "sve,sve2")]
21418#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21419#[cfg_attr(test, assert_instr(subhnt))]
21420pub fn svsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t {
21421 unsafe extern "unadjusted" {
21422 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv4i32")]
21423 fn _svsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t;
21424 }
21425 unsafe { _svsubhnt_s32(even, op1, op2) }
21426}
21427#[doc = "Subtract narrow high part (top)"]
21428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s32])"]
21429#[inline(always)]
21430#[target_feature(enable = "sve,sve2")]
21431#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21432#[cfg_attr(test, assert_instr(subhnt))]
21433pub fn svsubhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t {
21434 svsubhnt_s32(even, op1, svdup_n_s32(op2))
21435}
21436#[doc = "Subtract narrow high part (top)"]
21437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s64])"]
21438#[inline(always)]
21439#[target_feature(enable = "sve,sve2")]
21440#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21441#[cfg_attr(test, assert_instr(subhnt))]
21442pub fn svsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t {
21443 unsafe extern "unadjusted" {
21444 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv2i64")]
21445 fn _svsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t;
21446 }
21447 unsafe { _svsubhnt_s64(even, op1, op2) }
21448}
21449#[doc = "Subtract narrow high part (top)"]
21450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s64])"]
21451#[inline(always)]
21452#[target_feature(enable = "sve,sve2")]
21453#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21454#[cfg_attr(test, assert_instr(subhnt))]
21455pub fn svsubhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t {
21456 svsubhnt_s64(even, op1, svdup_n_s64(op2))
21457}
21458#[doc = "Subtract narrow high part (top)"]
21459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u16])"]
21460#[inline(always)]
21461#[target_feature(enable = "sve,sve2")]
21462#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21463#[cfg_attr(test, assert_instr(subhnt))]
21464pub fn svsubhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t {
21465 unsafe { svsubhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
21466}
21467#[doc = "Subtract narrow high part (top)"]
21468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u16])"]
21469#[inline(always)]
21470#[target_feature(enable = "sve,sve2")]
21471#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21472#[cfg_attr(test, assert_instr(subhnt))]
21473pub fn svsubhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t {
21474 svsubhnt_u16(even, op1, svdup_n_u16(op2))
21475}
21476#[doc = "Subtract narrow high part (top)"]
21477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u32])"]
21478#[inline(always)]
21479#[target_feature(enable = "sve,sve2")]
21480#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21481#[cfg_attr(test, assert_instr(subhnt))]
21482pub fn svsubhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t {
21483 unsafe { svsubhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
21484}
21485#[doc = "Subtract narrow high part (top)"]
21486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u32])"]
21487#[inline(always)]
21488#[target_feature(enable = "sve,sve2")]
21489#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21490#[cfg_attr(test, assert_instr(subhnt))]
21491pub fn svsubhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t {
21492 svsubhnt_u32(even, op1, svdup_n_u32(op2))
21493}
21494#[doc = "Subtract narrow high part (top)"]
21495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u64])"]
21496#[inline(always)]
21497#[target_feature(enable = "sve,sve2")]
21498#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21499#[cfg_attr(test, assert_instr(subhnt))]
21500pub fn svsubhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t {
21501 unsafe { svsubhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
21502}
21503#[doc = "Subtract narrow high part (top)"]
21504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u64])"]
21505#[inline(always)]
21506#[target_feature(enable = "sve,sve2")]
21507#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21508#[cfg_attr(test, assert_instr(subhnt))]
21509pub fn svsubhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t {
21510 svsubhnt_u64(even, op1, svdup_n_u64(op2))
21511}
21512#[doc = "Subtract long (bottom)"]
21513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s16])"]
21514#[inline(always)]
21515#[target_feature(enable = "sve,sve2")]
21516#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21517#[cfg_attr(test, assert_instr(ssublb))]
21518pub fn svsublb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
21519 unsafe extern "unadjusted" {
21520 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv8i16")]
21521 fn _svsublb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
21522 }
21523 unsafe { _svsublb_s16(op1, op2) }
21524}
21525#[doc = "Subtract long (bottom)"]
21526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s16])"]
21527#[inline(always)]
21528#[target_feature(enable = "sve,sve2")]
21529#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21530#[cfg_attr(test, assert_instr(ssublb))]
21531pub fn svsublb_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
21532 svsublb_s16(op1, svdup_n_s8(op2))
21533}
21534#[doc = "Subtract long (bottom)"]
21535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s32])"]
21536#[inline(always)]
21537#[target_feature(enable = "sve,sve2")]
21538#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21539#[cfg_attr(test, assert_instr(ssublb))]
21540pub fn svsublb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
21541 unsafe extern "unadjusted" {
21542 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv4i32")]
21543 fn _svsublb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
21544 }
21545 unsafe { _svsublb_s32(op1, op2) }
21546}
21547#[doc = "Subtract long (bottom)"]
21548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s32])"]
21549#[inline(always)]
21550#[target_feature(enable = "sve,sve2")]
21551#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21552#[cfg_attr(test, assert_instr(ssublb))]
21553pub fn svsublb_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
21554 svsublb_s32(op1, svdup_n_s16(op2))
21555}
21556#[doc = "Subtract long (bottom)"]
21557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s64])"]
21558#[inline(always)]
21559#[target_feature(enable = "sve,sve2")]
21560#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21561#[cfg_attr(test, assert_instr(ssublb))]
21562pub fn svsublb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
21563 unsafe extern "unadjusted" {
21564 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv2i64")]
21565 fn _svsublb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
21566 }
21567 unsafe { _svsublb_s64(op1, op2) }
21568}
21569#[doc = "Subtract long (bottom)"]
21570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s64])"]
21571#[inline(always)]
21572#[target_feature(enable = "sve,sve2")]
21573#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21574#[cfg_attr(test, assert_instr(ssublb))]
21575pub fn svsublb_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
21576 svsublb_s64(op1, svdup_n_s32(op2))
21577}
21578#[doc = "Subtract long (bottom)"]
21579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u16])"]
21580#[inline(always)]
21581#[target_feature(enable = "sve,sve2")]
21582#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21583#[cfg_attr(test, assert_instr(usublb))]
21584pub fn svsublb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
21585 unsafe extern "unadjusted" {
21586 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv8i16")]
21587 fn _svsublb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t;
21588 }
21589 unsafe { _svsublb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
21590}
21591#[doc = "Subtract long (bottom)"]
21592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u16])"]
21593#[inline(always)]
21594#[target_feature(enable = "sve,sve2")]
21595#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21596#[cfg_attr(test, assert_instr(usublb))]
21597pub fn svsublb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
21598 svsublb_u16(op1, svdup_n_u8(op2))
21599}
21600#[doc = "Subtract long (bottom)"]
21601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u32])"]
21602#[inline(always)]
21603#[target_feature(enable = "sve,sve2")]
21604#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21605#[cfg_attr(test, assert_instr(usublb))]
21606pub fn svsublb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
21607 unsafe extern "unadjusted" {
21608 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv4i32")]
21609 fn _svsublb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t;
21610 }
21611 unsafe { _svsublb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
21612}
21613#[doc = "Subtract long (bottom)"]
21614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u32])"]
21615#[inline(always)]
21616#[target_feature(enable = "sve,sve2")]
21617#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21618#[cfg_attr(test, assert_instr(usublb))]
21619pub fn svsublb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t {
21620 svsublb_u32(op1, svdup_n_u16(op2))
21621}
21622#[doc = "Subtract long (bottom)"]
21623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u64])"]
21624#[inline(always)]
21625#[target_feature(enable = "sve,sve2")]
21626#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21627#[cfg_attr(test, assert_instr(usublb))]
21628pub fn svsublb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
21629 unsafe extern "unadjusted" {
21630 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv2i64")]
21631 fn _svsublb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t;
21632 }
21633 unsafe { _svsublb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
21634}
21635#[doc = "Subtract long (bottom)"]
21636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u64])"]
21637#[inline(always)]
21638#[target_feature(enable = "sve,sve2")]
21639#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21640#[cfg_attr(test, assert_instr(usublb))]
21641pub fn svsublb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
21642 svsublb_u64(op1, svdup_n_u32(op2))
21643}
21644#[doc = "Subtract long (bottom - top)"]
21645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s16])"]
21646#[inline(always)]
21647#[target_feature(enable = "sve,sve2")]
21648#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21649#[cfg_attr(test, assert_instr(ssublbt))]
21650pub fn svsublbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
21651 unsafe extern "unadjusted" {
21652 #[cfg_attr(
21653 target_arch = "aarch64",
21654 link_name = "llvm.aarch64.sve.ssublbt.nxv8i16"
21655 )]
21656 fn _svsublbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
21657 }
21658 unsafe { _svsublbt_s16(op1, op2) }
21659}
21660#[doc = "Subtract long (bottom - top)"]
21661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s16])"]
21662#[inline(always)]
21663#[target_feature(enable = "sve,sve2")]
21664#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21665#[cfg_attr(test, assert_instr(ssublbt))]
21666pub fn svsublbt_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
21667 svsublbt_s16(op1, svdup_n_s8(op2))
21668}
21669#[doc = "Subtract long (bottom - top)"]
21670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s32])"]
21671#[inline(always)]
21672#[target_feature(enable = "sve,sve2")]
21673#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21674#[cfg_attr(test, assert_instr(ssublbt))]
21675pub fn svsublbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
21676 unsafe extern "unadjusted" {
21677 #[cfg_attr(
21678 target_arch = "aarch64",
21679 link_name = "llvm.aarch64.sve.ssublbt.nxv4i32"
21680 )]
21681 fn _svsublbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
21682 }
21683 unsafe { _svsublbt_s32(op1, op2) }
21684}
21685#[doc = "Subtract long (bottom - top)"]
21686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s32])"]
21687#[inline(always)]
21688#[target_feature(enable = "sve,sve2")]
21689#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21690#[cfg_attr(test, assert_instr(ssublbt))]
21691pub fn svsublbt_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
21692 svsublbt_s32(op1, svdup_n_s16(op2))
21693}
21694#[doc = "Subtract long (bottom - top)"]
21695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s64])"]
21696#[inline(always)]
21697#[target_feature(enable = "sve,sve2")]
21698#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21699#[cfg_attr(test, assert_instr(ssublbt))]
21700pub fn svsublbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
21701 unsafe extern "unadjusted" {
21702 #[cfg_attr(
21703 target_arch = "aarch64",
21704 link_name = "llvm.aarch64.sve.ssublbt.nxv2i64"
21705 )]
21706 fn _svsublbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
21707 }
21708 unsafe { _svsublbt_s64(op1, op2) }
21709}
21710#[doc = "Subtract long (bottom - top)"]
21711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s64])"]
21712#[inline(always)]
21713#[target_feature(enable = "sve,sve2")]
21714#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21715#[cfg_attr(test, assert_instr(ssublbt))]
21716pub fn svsublbt_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
21717 svsublbt_s64(op1, svdup_n_s32(op2))
21718}
21719#[doc = "Subtract long (top)"]
21720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s16])"]
21721#[inline(always)]
21722#[target_feature(enable = "sve,sve2")]
21723#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21724#[cfg_attr(test, assert_instr(ssublt))]
21725pub fn svsublt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
21726 unsafe extern "unadjusted" {
21727 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv8i16")]
21728 fn _svsublt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
21729 }
21730 unsafe { _svsublt_s16(op1, op2) }
21731}
21732#[doc = "Subtract long (top)"]
21733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s16])"]
21734#[inline(always)]
21735#[target_feature(enable = "sve,sve2")]
21736#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21737#[cfg_attr(test, assert_instr(ssublt))]
21738pub fn svsublt_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
21739 svsublt_s16(op1, svdup_n_s8(op2))
21740}
21741#[doc = "Subtract long (top)"]
21742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s32])"]
21743#[inline(always)]
21744#[target_feature(enable = "sve,sve2")]
21745#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21746#[cfg_attr(test, assert_instr(ssublt))]
21747pub fn svsublt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
21748 unsafe extern "unadjusted" {
21749 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv4i32")]
21750 fn _svsublt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
21751 }
21752 unsafe { _svsublt_s32(op1, op2) }
21753}
21754#[doc = "Subtract long (top)"]
21755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s32])"]
21756#[inline(always)]
21757#[target_feature(enable = "sve,sve2")]
21758#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21759#[cfg_attr(test, assert_instr(ssublt))]
21760pub fn svsublt_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
21761 svsublt_s32(op1, svdup_n_s16(op2))
21762}
21763#[doc = "Subtract long (top)"]
21764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s64])"]
21765#[inline(always)]
21766#[target_feature(enable = "sve,sve2")]
21767#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21768#[cfg_attr(test, assert_instr(ssublt))]
21769pub fn svsublt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
21770 unsafe extern "unadjusted" {
21771 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv2i64")]
21772 fn _svsublt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
21773 }
21774 unsafe { _svsublt_s64(op1, op2) }
21775}
21776#[doc = "Subtract long (top)"]
21777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s64])"]
21778#[inline(always)]
21779#[target_feature(enable = "sve,sve2")]
21780#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21781#[cfg_attr(test, assert_instr(ssublt))]
21782pub fn svsublt_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
21783 svsublt_s64(op1, svdup_n_s32(op2))
21784}
21785#[doc = "Subtract long (top)"]
21786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u16])"]
21787#[inline(always)]
21788#[target_feature(enable = "sve,sve2")]
21789#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21790#[cfg_attr(test, assert_instr(usublt))]
21791pub fn svsublt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
21792 unsafe extern "unadjusted" {
21793 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv8i16")]
21794 fn _svsublt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t;
21795 }
21796 unsafe { _svsublt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
21797}
21798#[doc = "Subtract long (top)"]
21799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u16])"]
21800#[inline(always)]
21801#[target_feature(enable = "sve,sve2")]
21802#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21803#[cfg_attr(test, assert_instr(usublt))]
21804pub fn svsublt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
21805 svsublt_u16(op1, svdup_n_u8(op2))
21806}
21807#[doc = "Subtract long (top)"]
21808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u32])"]
21809#[inline(always)]
21810#[target_feature(enable = "sve,sve2")]
21811#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21812#[cfg_attr(test, assert_instr(usublt))]
21813pub fn svsublt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
21814 unsafe extern "unadjusted" {
21815 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv4i32")]
21816 fn _svsublt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t;
21817 }
21818 unsafe { _svsublt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
21819}
21820#[doc = "Subtract long (top)"]
21821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u32])"]
21822#[inline(always)]
21823#[target_feature(enable = "sve,sve2")]
21824#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21825#[cfg_attr(test, assert_instr(usublt))]
21826pub fn svsublt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t {
21827 svsublt_u32(op1, svdup_n_u16(op2))
21828}
21829#[doc = "Subtract long (top)"]
21830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u64])"]
21831#[inline(always)]
21832#[target_feature(enable = "sve,sve2")]
21833#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21834#[cfg_attr(test, assert_instr(usublt))]
21835pub fn svsublt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
21836 unsafe extern "unadjusted" {
21837 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv2i64")]
21838 fn _svsublt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t;
21839 }
21840 unsafe { _svsublt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
21841}
21842#[doc = "Subtract long (top)"]
21843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u64])"]
21844#[inline(always)]
21845#[target_feature(enable = "sve,sve2")]
21846#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21847#[cfg_attr(test, assert_instr(usublt))]
21848pub fn svsublt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
21849 svsublt_u64(op1, svdup_n_u32(op2))
21850}
21851#[doc = "Subtract long (top - bottom)"]
21852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s16])"]
21853#[inline(always)]
21854#[target_feature(enable = "sve,sve2")]
21855#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21856#[cfg_attr(test, assert_instr(ssubltb))]
21857pub fn svsubltb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
21858 unsafe extern "unadjusted" {
21859 #[cfg_attr(
21860 target_arch = "aarch64",
21861 link_name = "llvm.aarch64.sve.ssubltb.nxv8i16"
21862 )]
21863 fn _svsubltb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
21864 }
21865 unsafe { _svsubltb_s16(op1, op2) }
21866}
21867#[doc = "Subtract long (top - bottom)"]
21868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s16])"]
21869#[inline(always)]
21870#[target_feature(enable = "sve,sve2")]
21871#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21872#[cfg_attr(test, assert_instr(ssubltb))]
21873pub fn svsubltb_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
21874 svsubltb_s16(op1, svdup_n_s8(op2))
21875}
21876#[doc = "Subtract long (top - bottom)"]
21877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s32])"]
21878#[inline(always)]
21879#[target_feature(enable = "sve,sve2")]
21880#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21881#[cfg_attr(test, assert_instr(ssubltb))]
21882pub fn svsubltb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
21883 unsafe extern "unadjusted" {
21884 #[cfg_attr(
21885 target_arch = "aarch64",
21886 link_name = "llvm.aarch64.sve.ssubltb.nxv4i32"
21887 )]
21888 fn _svsubltb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
21889 }
21890 unsafe { _svsubltb_s32(op1, op2) }
21891}
21892#[doc = "Subtract long (top - bottom)"]
21893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s32])"]
21894#[inline(always)]
21895#[target_feature(enable = "sve,sve2")]
21896#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21897#[cfg_attr(test, assert_instr(ssubltb))]
21898pub fn svsubltb_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
21899 svsubltb_s32(op1, svdup_n_s16(op2))
21900}
21901#[doc = "Subtract long (top - bottom)"]
21902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s64])"]
21903#[inline(always)]
21904#[target_feature(enable = "sve,sve2")]
21905#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21906#[cfg_attr(test, assert_instr(ssubltb))]
21907pub fn svsubltb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
21908 unsafe extern "unadjusted" {
21909 #[cfg_attr(
21910 target_arch = "aarch64",
21911 link_name = "llvm.aarch64.sve.ssubltb.nxv2i64"
21912 )]
21913 fn _svsubltb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
21914 }
21915 unsafe { _svsubltb_s64(op1, op2) }
21916}
21917#[doc = "Subtract long (top - bottom)"]
21918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s64])"]
21919#[inline(always)]
21920#[target_feature(enable = "sve,sve2")]
21921#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21922#[cfg_attr(test, assert_instr(ssubltb))]
21923pub fn svsubltb_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
21924 svsubltb_s64(op1, svdup_n_s32(op2))
21925}
21926#[doc = "Subtract wide (bottom)"]
21927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s16])"]
21928#[inline(always)]
21929#[target_feature(enable = "sve,sve2")]
21930#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21931#[cfg_attr(test, assert_instr(ssubwb))]
21932pub fn svsubwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t {
21933 unsafe extern "unadjusted" {
21934 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv8i16")]
21935 fn _svsubwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t;
21936 }
21937 unsafe { _svsubwb_s16(op1, op2) }
21938}
21939#[doc = "Subtract wide (bottom)"]
21940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s16])"]
21941#[inline(always)]
21942#[target_feature(enable = "sve,sve2")]
21943#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21944#[cfg_attr(test, assert_instr(ssubwb))]
21945pub fn svsubwb_n_s16(op1: svint16_t, op2: i8) -> svint16_t {
21946 svsubwb_s16(op1, svdup_n_s8(op2))
21947}
21948#[doc = "Subtract wide (bottom)"]
21949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s32])"]
21950#[inline(always)]
21951#[target_feature(enable = "sve,sve2")]
21952#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21953#[cfg_attr(test, assert_instr(ssubwb))]
21954pub fn svsubwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t {
21955 unsafe extern "unadjusted" {
21956 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv4i32")]
21957 fn _svsubwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t;
21958 }
21959 unsafe { _svsubwb_s32(op1, op2) }
21960}
21961#[doc = "Subtract wide (bottom)"]
21962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s32])"]
21963#[inline(always)]
21964#[target_feature(enable = "sve,sve2")]
21965#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21966#[cfg_attr(test, assert_instr(ssubwb))]
21967pub fn svsubwb_n_s32(op1: svint32_t, op2: i16) -> svint32_t {
21968 svsubwb_s32(op1, svdup_n_s16(op2))
21969}
21970#[doc = "Subtract wide (bottom)"]
21971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s64])"]
21972#[inline(always)]
21973#[target_feature(enable = "sve,sve2")]
21974#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21975#[cfg_attr(test, assert_instr(ssubwb))]
21976pub fn svsubwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t {
21977 unsafe extern "unadjusted" {
21978 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv2i64")]
21979 fn _svsubwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t;
21980 }
21981 unsafe { _svsubwb_s64(op1, op2) }
21982}
21983#[doc = "Subtract wide (bottom)"]
21984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s64])"]
21985#[inline(always)]
21986#[target_feature(enable = "sve,sve2")]
21987#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21988#[cfg_attr(test, assert_instr(ssubwb))]
21989pub fn svsubwb_n_s64(op1: svint64_t, op2: i32) -> svint64_t {
21990 svsubwb_s64(op1, svdup_n_s32(op2))
21991}
21992#[doc = "Subtract wide (bottom)"]
21993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u16])"]
21994#[inline(always)]
21995#[target_feature(enable = "sve,sve2")]
21996#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21997#[cfg_attr(test, assert_instr(usubwb))]
21998pub fn svsubwb_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t {
21999 unsafe extern "unadjusted" {
22000 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv8i16")]
22001 fn _svsubwb_u16(op1: svint16_t, op2: svint8_t) -> svint16_t;
22002 }
22003 unsafe { _svsubwb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
22004}
22005#[doc = "Subtract wide (bottom)"]
22006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u16])"]
22007#[inline(always)]
22008#[target_feature(enable = "sve,sve2")]
22009#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22010#[cfg_attr(test, assert_instr(usubwb))]
22011pub fn svsubwb_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t {
22012 svsubwb_u16(op1, svdup_n_u8(op2))
22013}
22014#[doc = "Subtract wide (bottom)"]
22015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u32])"]
22016#[inline(always)]
22017#[target_feature(enable = "sve,sve2")]
22018#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22019#[cfg_attr(test, assert_instr(usubwb))]
22020pub fn svsubwb_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t {
22021 unsafe extern "unadjusted" {
22022 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv4i32")]
22023 fn _svsubwb_u32(op1: svint32_t, op2: svint16_t) -> svint32_t;
22024 }
22025 unsafe { _svsubwb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
22026}
22027#[doc = "Subtract wide (bottom)"]
22028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u32])"]
22029#[inline(always)]
22030#[target_feature(enable = "sve,sve2")]
22031#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22032#[cfg_attr(test, assert_instr(usubwb))]
22033pub fn svsubwb_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t {
22034 svsubwb_u32(op1, svdup_n_u16(op2))
22035}
22036#[doc = "Subtract wide (bottom)"]
22037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u64])"]
22038#[inline(always)]
22039#[target_feature(enable = "sve,sve2")]
22040#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22041#[cfg_attr(test, assert_instr(usubwb))]
22042pub fn svsubwb_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t {
22043 unsafe extern "unadjusted" {
22044 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv2i64")]
22045 fn _svsubwb_u64(op1: svint64_t, op2: svint32_t) -> svint64_t;
22046 }
22047 unsafe { _svsubwb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
22048}
22049#[doc = "Subtract wide (bottom)"]
22050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u64])"]
22051#[inline(always)]
22052#[target_feature(enable = "sve,sve2")]
22053#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22054#[cfg_attr(test, assert_instr(usubwb))]
22055pub fn svsubwb_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t {
22056 svsubwb_u64(op1, svdup_n_u32(op2))
22057}
22058#[doc = "Subtract wide (top)"]
22059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s16])"]
22060#[inline(always)]
22061#[target_feature(enable = "sve,sve2")]
22062#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22063#[cfg_attr(test, assert_instr(ssubwt))]
22064pub fn svsubwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t {
22065 unsafe extern "unadjusted" {
22066 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv8i16")]
22067 fn _svsubwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t;
22068 }
22069 unsafe { _svsubwt_s16(op1, op2) }
22070}
22071#[doc = "Subtract wide (top)"]
22072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s16])"]
22073#[inline(always)]
22074#[target_feature(enable = "sve,sve2")]
22075#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22076#[cfg_attr(test, assert_instr(ssubwt))]
22077pub fn svsubwt_n_s16(op1: svint16_t, op2: i8) -> svint16_t {
22078 svsubwt_s16(op1, svdup_n_s8(op2))
22079}
22080#[doc = "Subtract wide (top)"]
22081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s32])"]
22082#[inline(always)]
22083#[target_feature(enable = "sve,sve2")]
22084#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22085#[cfg_attr(test, assert_instr(ssubwt))]
22086pub fn svsubwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t {
22087 unsafe extern "unadjusted" {
22088 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv4i32")]
22089 fn _svsubwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t;
22090 }
22091 unsafe { _svsubwt_s32(op1, op2) }
22092}
22093#[doc = "Subtract wide (top)"]
22094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s32])"]
22095#[inline(always)]
22096#[target_feature(enable = "sve,sve2")]
22097#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22098#[cfg_attr(test, assert_instr(ssubwt))]
22099pub fn svsubwt_n_s32(op1: svint32_t, op2: i16) -> svint32_t {
22100 svsubwt_s32(op1, svdup_n_s16(op2))
22101}
22102#[doc = "Subtract wide (top)"]
22103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s64])"]
22104#[inline(always)]
22105#[target_feature(enable = "sve,sve2")]
22106#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22107#[cfg_attr(test, assert_instr(ssubwt))]
22108pub fn svsubwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t {
22109 unsafe extern "unadjusted" {
22110 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv2i64")]
22111 fn _svsubwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t;
22112 }
22113 unsafe { _svsubwt_s64(op1, op2) }
22114}
22115#[doc = "Subtract wide (top)"]
22116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s64])"]
22117#[inline(always)]
22118#[target_feature(enable = "sve,sve2")]
22119#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22120#[cfg_attr(test, assert_instr(ssubwt))]
22121pub fn svsubwt_n_s64(op1: svint64_t, op2: i32) -> svint64_t {
22122 svsubwt_s64(op1, svdup_n_s32(op2))
22123}
22124#[doc = "Subtract wide (top)"]
22125#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u16])"]
22126#[inline(always)]
22127#[target_feature(enable = "sve,sve2")]
22128#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22129#[cfg_attr(test, assert_instr(usubwt))]
22130pub fn svsubwt_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t {
22131 unsafe extern "unadjusted" {
22132 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv8i16")]
22133 fn _svsubwt_u16(op1: svint16_t, op2: svint8_t) -> svint16_t;
22134 }
22135 unsafe { _svsubwt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
22136}
22137#[doc = "Subtract wide (top)"]
22138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u16])"]
22139#[inline(always)]
22140#[target_feature(enable = "sve,sve2")]
22141#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22142#[cfg_attr(test, assert_instr(usubwt))]
22143pub fn svsubwt_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t {
22144 svsubwt_u16(op1, svdup_n_u8(op2))
22145}
22146#[doc = "Subtract wide (top)"]
22147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u32])"]
22148#[inline(always)]
22149#[target_feature(enable = "sve,sve2")]
22150#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22151#[cfg_attr(test, assert_instr(usubwt))]
22152pub fn svsubwt_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t {
22153 unsafe extern "unadjusted" {
22154 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv4i32")]
22155 fn _svsubwt_u32(op1: svint32_t, op2: svint16_t) -> svint32_t;
22156 }
22157 unsafe { _svsubwt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
22158}
22159#[doc = "Subtract wide (top)"]
22160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u32])"]
22161#[inline(always)]
22162#[target_feature(enable = "sve,sve2")]
22163#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22164#[cfg_attr(test, assert_instr(usubwt))]
22165pub fn svsubwt_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t {
22166 svsubwt_u32(op1, svdup_n_u16(op2))
22167}
22168#[doc = "Subtract wide (top)"]
22169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u64])"]
22170#[inline(always)]
22171#[target_feature(enable = "sve,sve2")]
22172#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22173#[cfg_attr(test, assert_instr(usubwt))]
22174pub fn svsubwt_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t {
22175 unsafe extern "unadjusted" {
22176 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv2i64")]
22177 fn _svsubwt_u64(op1: svint64_t, op2: svint32_t) -> svint64_t;
22178 }
22179 unsafe { _svsubwt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
22180}
22181#[doc = "Subtract wide (top)"]
22182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u64])"]
22183#[inline(always)]
22184#[target_feature(enable = "sve,sve2")]
22185#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22186#[cfg_attr(test, assert_instr(usubwt))]
22187pub fn svsubwt_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t {
22188 svsubwt_u64(op1, svdup_n_u32(op2))
22189}
22190#[doc = "Table lookup in two-vector table"]
22191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_f32])"]
22192#[inline(always)]
22193#[target_feature(enable = "sve,sve2")]
22194#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22195#[cfg_attr(test, assert_instr(tbl))]
22196pub fn svtbl2_f32(data: svfloat32x2_t, indices: svuint32_t) -> svfloat32_t {
22197 unsafe extern "unadjusted" {
22198 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv4f32")]
22199 fn _svtbl2_f32(data0: svfloat32_t, data1: svfloat32_t, indices: svint32_t) -> svfloat32_t;
22200 }
22201 unsafe {
22202 _svtbl2_f32(
22203 svget2_f32::<0>(data),
22204 svget2_f32::<1>(data),
22205 indices.as_signed(),
22206 )
22207 }
22208}
22209#[doc = "Table lookup in two-vector table"]
22210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_f64])"]
22211#[inline(always)]
22212#[target_feature(enable = "sve,sve2")]
22213#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22214#[cfg_attr(test, assert_instr(tbl))]
22215pub fn svtbl2_f64(data: svfloat64x2_t, indices: svuint64_t) -> svfloat64_t {
22216 unsafe extern "unadjusted" {
22217 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv2f64")]
22218 fn _svtbl2_f64(data0: svfloat64_t, data1: svfloat64_t, indices: svint64_t) -> svfloat64_t;
22219 }
22220 unsafe {
22221 _svtbl2_f64(
22222 svget2_f64::<0>(data),
22223 svget2_f64::<1>(data),
22224 indices.as_signed(),
22225 )
22226 }
22227}
22228#[doc = "Table lookup in two-vector table"]
22229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s8])"]
22230#[inline(always)]
22231#[target_feature(enable = "sve,sve2")]
22232#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22233#[cfg_attr(test, assert_instr(tbl))]
22234pub fn svtbl2_s8(data: svint8x2_t, indices: svuint8_t) -> svint8_t {
22235 unsafe extern "unadjusted" {
22236 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv16i8")]
22237 fn _svtbl2_s8(data0: svint8_t, data1: svint8_t, indices: svint8_t) -> svint8_t;
22238 }
22239 unsafe {
22240 _svtbl2_s8(
22241 svget2_s8::<0>(data),
22242 svget2_s8::<1>(data),
22243 indices.as_signed(),
22244 )
22245 }
22246}
22247#[doc = "Table lookup in two-vector table"]
22248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s16])"]
22249#[inline(always)]
22250#[target_feature(enable = "sve,sve2")]
22251#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22252#[cfg_attr(test, assert_instr(tbl))]
22253pub fn svtbl2_s16(data: svint16x2_t, indices: svuint16_t) -> svint16_t {
22254 unsafe extern "unadjusted" {
22255 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv8i16")]
22256 fn _svtbl2_s16(data0: svint16_t, data1: svint16_t, indices: svint16_t) -> svint16_t;
22257 }
22258 unsafe {
22259 _svtbl2_s16(
22260 svget2_s16::<0>(data),
22261 svget2_s16::<1>(data),
22262 indices.as_signed(),
22263 )
22264 }
22265}
22266#[doc = "Table lookup in two-vector table"]
22267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s32])"]
22268#[inline(always)]
22269#[target_feature(enable = "sve,sve2")]
22270#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22271#[cfg_attr(test, assert_instr(tbl))]
22272pub fn svtbl2_s32(data: svint32x2_t, indices: svuint32_t) -> svint32_t {
22273 unsafe extern "unadjusted" {
22274 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv4i32")]
22275 fn _svtbl2_s32(data0: svint32_t, data1: svint32_t, indices: svint32_t) -> svint32_t;
22276 }
22277 unsafe {
22278 _svtbl2_s32(
22279 svget2_s32::<0>(data),
22280 svget2_s32::<1>(data),
22281 indices.as_signed(),
22282 )
22283 }
22284}
22285#[doc = "Table lookup in two-vector table"]
22286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s64])"]
22287#[inline(always)]
22288#[target_feature(enable = "sve,sve2")]
22289#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22290#[cfg_attr(test, assert_instr(tbl))]
22291pub fn svtbl2_s64(data: svint64x2_t, indices: svuint64_t) -> svint64_t {
22292 unsafe extern "unadjusted" {
22293 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv2i64")]
22294 fn _svtbl2_s64(data0: svint64_t, data1: svint64_t, indices: svint64_t) -> svint64_t;
22295 }
22296 unsafe {
22297 _svtbl2_s64(
22298 svget2_s64::<0>(data),
22299 svget2_s64::<1>(data),
22300 indices.as_signed(),
22301 )
22302 }
22303}
22304#[doc = "Table lookup in two-vector table"]
22305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u8])"]
22306#[inline(always)]
22307#[target_feature(enable = "sve,sve2")]
22308#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22309#[cfg_attr(test, assert_instr(tbl))]
22310pub fn svtbl2_u8(data: svuint8x2_t, indices: svuint8_t) -> svuint8_t {
22311 unsafe { svtbl2_s8(data.as_signed(), indices).as_unsigned() }
22312}
22313#[doc = "Table lookup in two-vector table"]
22314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u16])"]
22315#[inline(always)]
22316#[target_feature(enable = "sve,sve2")]
22317#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22318#[cfg_attr(test, assert_instr(tbl))]
22319pub fn svtbl2_u16(data: svuint16x2_t, indices: svuint16_t) -> svuint16_t {
22320 unsafe { svtbl2_s16(data.as_signed(), indices).as_unsigned() }
22321}
22322#[doc = "Table lookup in two-vector table"]
22323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u32])"]
22324#[inline(always)]
22325#[target_feature(enable = "sve,sve2")]
22326#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22327#[cfg_attr(test, assert_instr(tbl))]
22328pub fn svtbl2_u32(data: svuint32x2_t, indices: svuint32_t) -> svuint32_t {
22329 unsafe { svtbl2_s32(data.as_signed(), indices).as_unsigned() }
22330}
22331#[doc = "Table lookup in two-vector table"]
22332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u64])"]
22333#[inline(always)]
22334#[target_feature(enable = "sve,sve2")]
22335#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22336#[cfg_attr(test, assert_instr(tbl))]
22337pub fn svtbl2_u64(data: svuint64x2_t, indices: svuint64_t) -> svuint64_t {
22338 unsafe { svtbl2_s64(data.as_signed(), indices).as_unsigned() }
22339}
22340#[doc = "Table lookup in single-vector table (merging)"]
22341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_f32])"]
22342#[inline(always)]
22343#[target_feature(enable = "sve,sve2")]
22344#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22345#[cfg_attr(test, assert_instr(tbx))]
22346pub fn svtbx_f32(fallback: svfloat32_t, data: svfloat32_t, indices: svuint32_t) -> svfloat32_t {
22347 unsafe extern "unadjusted" {
22348 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv4f32")]
22349 fn _svtbx_f32(fallback: svfloat32_t, data: svfloat32_t, indices: svint32_t) -> svfloat32_t;
22350 }
22351 unsafe { _svtbx_f32(fallback, data, indices.as_signed()) }
22352}
22353#[doc = "Table lookup in single-vector table (merging)"]
22354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_f64])"]
22355#[inline(always)]
22356#[target_feature(enable = "sve,sve2")]
22357#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22358#[cfg_attr(test, assert_instr(tbx))]
22359pub fn svtbx_f64(fallback: svfloat64_t, data: svfloat64_t, indices: svuint64_t) -> svfloat64_t {
22360 unsafe extern "unadjusted" {
22361 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv2f64")]
22362 fn _svtbx_f64(fallback: svfloat64_t, data: svfloat64_t, indices: svint64_t) -> svfloat64_t;
22363 }
22364 unsafe { _svtbx_f64(fallback, data, indices.as_signed()) }
22365}
22366#[doc = "Table lookup in single-vector table (merging)"]
22367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s8])"]
22368#[inline(always)]
22369#[target_feature(enable = "sve,sve2")]
22370#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22371#[cfg_attr(test, assert_instr(tbx))]
22372pub fn svtbx_s8(fallback: svint8_t, data: svint8_t, indices: svuint8_t) -> svint8_t {
22373 unsafe extern "unadjusted" {
22374 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv16i8")]
22375 fn _svtbx_s8(fallback: svint8_t, data: svint8_t, indices: svint8_t) -> svint8_t;
22376 }
22377 unsafe { _svtbx_s8(fallback, data, indices.as_signed()) }
22378}
22379#[doc = "Table lookup in single-vector table (merging)"]
22380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s16])"]
22381#[inline(always)]
22382#[target_feature(enable = "sve,sve2")]
22383#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22384#[cfg_attr(test, assert_instr(tbx))]
22385pub fn svtbx_s16(fallback: svint16_t, data: svint16_t, indices: svuint16_t) -> svint16_t {
22386 unsafe extern "unadjusted" {
22387 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv8i16")]
22388 fn _svtbx_s16(fallback: svint16_t, data: svint16_t, indices: svint16_t) -> svint16_t;
22389 }
22390 unsafe { _svtbx_s16(fallback, data, indices.as_signed()) }
22391}
22392#[doc = "Table lookup in single-vector table (merging)"]
22393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s32])"]
22394#[inline(always)]
22395#[target_feature(enable = "sve,sve2")]
22396#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22397#[cfg_attr(test, assert_instr(tbx))]
22398pub fn svtbx_s32(fallback: svint32_t, data: svint32_t, indices: svuint32_t) -> svint32_t {
22399 unsafe extern "unadjusted" {
22400 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv4i32")]
22401 fn _svtbx_s32(fallback: svint32_t, data: svint32_t, indices: svint32_t) -> svint32_t;
22402 }
22403 unsafe { _svtbx_s32(fallback, data, indices.as_signed()) }
22404}
22405#[doc = "Table lookup in single-vector table (merging)"]
22406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s64])"]
22407#[inline(always)]
22408#[target_feature(enable = "sve,sve2")]
22409#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22410#[cfg_attr(test, assert_instr(tbx))]
22411pub fn svtbx_s64(fallback: svint64_t, data: svint64_t, indices: svuint64_t) -> svint64_t {
22412 unsafe extern "unadjusted" {
22413 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv2i64")]
22414 fn _svtbx_s64(fallback: svint64_t, data: svint64_t, indices: svint64_t) -> svint64_t;
22415 }
22416 unsafe { _svtbx_s64(fallback, data, indices.as_signed()) }
22417}
22418#[doc = "Table lookup in single-vector table (merging)"]
22419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u8])"]
22420#[inline(always)]
22421#[target_feature(enable = "sve,sve2")]
22422#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22423#[cfg_attr(test, assert_instr(tbx))]
22424pub fn svtbx_u8(fallback: svuint8_t, data: svuint8_t, indices: svuint8_t) -> svuint8_t {
22425 unsafe { svtbx_s8(fallback.as_signed(), data.as_signed(), indices).as_unsigned() }
22426}
22427#[doc = "Table lookup in single-vector table (merging)"]
22428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u16])"]
22429#[inline(always)]
22430#[target_feature(enable = "sve,sve2")]
22431#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22432#[cfg_attr(test, assert_instr(tbx))]
22433pub fn svtbx_u16(fallback: svuint16_t, data: svuint16_t, indices: svuint16_t) -> svuint16_t {
22434 unsafe { svtbx_s16(fallback.as_signed(), data.as_signed(), indices).as_unsigned() }
22435}
22436#[doc = "Table lookup in single-vector table (merging)"]
22437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u32])"]
22438#[inline(always)]
22439#[target_feature(enable = "sve,sve2")]
22440#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22441#[cfg_attr(test, assert_instr(tbx))]
22442pub fn svtbx_u32(fallback: svuint32_t, data: svuint32_t, indices: svuint32_t) -> svuint32_t {
22443 unsafe { svtbx_s32(fallback.as_signed(), data.as_signed(), indices).as_unsigned() }
22444}
22445#[doc = "Table lookup in single-vector table (merging)"]
22446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u64])"]
22447#[inline(always)]
22448#[target_feature(enable = "sve,sve2")]
22449#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22450#[cfg_attr(test, assert_instr(tbx))]
22451pub fn svtbx_u64(fallback: svuint64_t, data: svuint64_t, indices: svuint64_t) -> svuint64_t {
22452 unsafe { svtbx_s64(fallback.as_signed(), data.as_signed(), indices).as_unsigned() }
22453}
22454#[doc = "Unpack and extend high half"]
22455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_b])"]
22456#[inline(always)]
22457#[target_feature(enable = "sve,sve2")]
22458#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22459#[cfg_attr(test, assert_instr(punpkhi))]
22460pub fn svunpkhi_b(op: svbool_t) -> svbool_t {
22461 unsafe extern "unadjusted" {
22462 #[cfg_attr(
22463 target_arch = "aarch64",
22464 link_name = "llvm.aarch64.sve.punpkhi.nxv16i1"
22465 )]
22466 fn _svunpkhi_b(op: svbool_t) -> svbool8_t;
22467 }
22468 unsafe { _svunpkhi_b(op).sve_into() }
22469}
22470#[doc = "Unpack and extend high half"]
22471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s16])"]
22472#[inline(always)]
22473#[target_feature(enable = "sve,sve2")]
22474#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22475#[cfg_attr(test, assert_instr(sunpkhi))]
22476pub fn svunpkhi_s16(op: svint8_t) -> svint16_t {
22477 unsafe extern "unadjusted" {
22478 #[cfg_attr(
22479 target_arch = "aarch64",
22480 link_name = "llvm.aarch64.sve.sunpkhi.nxv8i16"
22481 )]
22482 fn _svunpkhi_s16(op: svint8_t) -> svint16_t;
22483 }
22484 unsafe { _svunpkhi_s16(op) }
22485}
22486#[doc = "Unpack and extend high half"]
22487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s32])"]
22488#[inline(always)]
22489#[target_feature(enable = "sve,sve2")]
22490#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22491#[cfg_attr(test, assert_instr(sunpkhi))]
22492pub fn svunpkhi_s32(op: svint16_t) -> svint32_t {
22493 unsafe extern "unadjusted" {
22494 #[cfg_attr(
22495 target_arch = "aarch64",
22496 link_name = "llvm.aarch64.sve.sunpkhi.nxv4i32"
22497 )]
22498 fn _svunpkhi_s32(op: svint16_t) -> svint32_t;
22499 }
22500 unsafe { _svunpkhi_s32(op) }
22501}
22502#[doc = "Unpack and extend high half"]
22503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s64])"]
22504#[inline(always)]
22505#[target_feature(enable = "sve,sve2")]
22506#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22507#[cfg_attr(test, assert_instr(sunpkhi))]
22508pub fn svunpkhi_s64(op: svint32_t) -> svint64_t {
22509 unsafe extern "unadjusted" {
22510 #[cfg_attr(
22511 target_arch = "aarch64",
22512 link_name = "llvm.aarch64.sve.sunpkhi.nxv2i64"
22513 )]
22514 fn _svunpkhi_s64(op: svint32_t) -> svint64_t;
22515 }
22516 unsafe { _svunpkhi_s64(op) }
22517}
22518#[doc = "Unpack and extend high half"]
22519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u16])"]
22520#[inline(always)]
22521#[target_feature(enable = "sve,sve2")]
22522#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22523#[cfg_attr(test, assert_instr(uunpkhi))]
22524pub fn svunpkhi_u16(op: svuint8_t) -> svuint16_t {
22525 unsafe extern "unadjusted" {
22526 #[cfg_attr(
22527 target_arch = "aarch64",
22528 link_name = "llvm.aarch64.sve.uunpkhi.nxv8i16"
22529 )]
22530 fn _svunpkhi_u16(op: svint8_t) -> svint16_t;
22531 }
22532 unsafe { _svunpkhi_u16(op.as_signed()).as_unsigned() }
22533}
22534#[doc = "Unpack and extend high half"]
22535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u32])"]
22536#[inline(always)]
22537#[target_feature(enable = "sve,sve2")]
22538#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22539#[cfg_attr(test, assert_instr(uunpkhi))]
22540pub fn svunpkhi_u32(op: svuint16_t) -> svuint32_t {
22541 unsafe extern "unadjusted" {
22542 #[cfg_attr(
22543 target_arch = "aarch64",
22544 link_name = "llvm.aarch64.sve.uunpkhi.nxv4i32"
22545 )]
22546 fn _svunpkhi_u32(op: svint16_t) -> svint32_t;
22547 }
22548 unsafe { _svunpkhi_u32(op.as_signed()).as_unsigned() }
22549}
22550#[doc = "Unpack and extend high half"]
22551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u64])"]
22552#[inline(always)]
22553#[target_feature(enable = "sve,sve2")]
22554#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22555#[cfg_attr(test, assert_instr(uunpkhi))]
22556pub fn svunpkhi_u64(op: svuint32_t) -> svuint64_t {
22557 unsafe extern "unadjusted" {
22558 #[cfg_attr(
22559 target_arch = "aarch64",
22560 link_name = "llvm.aarch64.sve.uunpkhi.nxv2i64"
22561 )]
22562 fn _svunpkhi_u64(op: svint32_t) -> svint64_t;
22563 }
22564 unsafe { _svunpkhi_u64(op.as_signed()).as_unsigned() }
22565}
22566#[doc = "Unpack and extend low half"]
22567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_b])"]
22568#[inline(always)]
22569#[target_feature(enable = "sve,sve2")]
22570#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22571#[cfg_attr(test, assert_instr(punpklo))]
22572pub fn svunpklo_b(op: svbool_t) -> svbool_t {
22573 unsafe extern "unadjusted" {
22574 #[cfg_attr(
22575 target_arch = "aarch64",
22576 link_name = "llvm.aarch64.sve.punpklo.nxv16i1"
22577 )]
22578 fn _svunpklo_b(op: svbool_t) -> svbool8_t;
22579 }
22580 unsafe { _svunpklo_b(op).sve_into() }
22581}
22582#[doc = "Unpack and extend low half"]
22583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s16])"]
22584#[inline(always)]
22585#[target_feature(enable = "sve,sve2")]
22586#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22587#[cfg_attr(test, assert_instr(sunpklo))]
22588pub fn svunpklo_s16(op: svint8_t) -> svint16_t {
22589 unsafe extern "unadjusted" {
22590 #[cfg_attr(
22591 target_arch = "aarch64",
22592 link_name = "llvm.aarch64.sve.sunpklo.nxv8i16"
22593 )]
22594 fn _svunpklo_s16(op: svint8_t) -> svint16_t;
22595 }
22596 unsafe { _svunpklo_s16(op) }
22597}
22598#[doc = "Unpack and extend low half"]
22599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s32])"]
22600#[inline(always)]
22601#[target_feature(enable = "sve,sve2")]
22602#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22603#[cfg_attr(test, assert_instr(sunpklo))]
22604pub fn svunpklo_s32(op: svint16_t) -> svint32_t {
22605 unsafe extern "unadjusted" {
22606 #[cfg_attr(
22607 target_arch = "aarch64",
22608 link_name = "llvm.aarch64.sve.sunpklo.nxv4i32"
22609 )]
22610 fn _svunpklo_s32(op: svint16_t) -> svint32_t;
22611 }
22612 unsafe { _svunpklo_s32(op) }
22613}
22614#[doc = "Unpack and extend low half"]
22615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s64])"]
22616#[inline(always)]
22617#[target_feature(enable = "sve,sve2")]
22618#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22619#[cfg_attr(test, assert_instr(sunpklo))]
22620pub fn svunpklo_s64(op: svint32_t) -> svint64_t {
22621 unsafe extern "unadjusted" {
22622 #[cfg_attr(
22623 target_arch = "aarch64",
22624 link_name = "llvm.aarch64.sve.sunpklo.nxv2i64"
22625 )]
22626 fn _svunpklo_s64(op: svint32_t) -> svint64_t;
22627 }
22628 unsafe { _svunpklo_s64(op) }
22629}
22630#[doc = "Unpack and extend low half"]
22631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u16])"]
22632#[inline(always)]
22633#[target_feature(enable = "sve,sve2")]
22634#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22635#[cfg_attr(test, assert_instr(uunpklo))]
22636pub fn svunpklo_u16(op: svuint8_t) -> svuint16_t {
22637 unsafe extern "unadjusted" {
22638 #[cfg_attr(
22639 target_arch = "aarch64",
22640 link_name = "llvm.aarch64.sve.uunpklo.nxv8i16"
22641 )]
22642 fn _svunpklo_u16(op: svint8_t) -> svint16_t;
22643 }
22644 unsafe { _svunpklo_u16(op.as_signed()).as_unsigned() }
22645}
22646#[doc = "Unpack and extend low half"]
22647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u32])"]
22648#[inline(always)]
22649#[target_feature(enable = "sve,sve2")]
22650#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22651#[cfg_attr(test, assert_instr(uunpklo))]
22652pub fn svunpklo_u32(op: svuint16_t) -> svuint32_t {
22653 unsafe extern "unadjusted" {
22654 #[cfg_attr(
22655 target_arch = "aarch64",
22656 link_name = "llvm.aarch64.sve.uunpklo.nxv4i32"
22657 )]
22658 fn _svunpklo_u32(op: svint16_t) -> svint32_t;
22659 }
22660 unsafe { _svunpklo_u32(op.as_signed()).as_unsigned() }
22661}
22662#[doc = "Unpack and extend low half"]
22663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u64])"]
22664#[inline(always)]
22665#[target_feature(enable = "sve,sve2")]
22666#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22667#[cfg_attr(test, assert_instr(uunpklo))]
22668pub fn svunpklo_u64(op: svuint32_t) -> svuint64_t {
22669 unsafe extern "unadjusted" {
22670 #[cfg_attr(
22671 target_arch = "aarch64",
22672 link_name = "llvm.aarch64.sve.uunpklo.nxv2i64"
22673 )]
22674 fn _svunpklo_u64(op: svint32_t) -> svint64_t;
22675 }
22676 unsafe { _svunpklo_u64(op.as_signed()).as_unsigned() }
22677}
22678#[doc = "Saturating add with unsigned addend"]
22679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_m)"]
22680#[inline(always)]
22681#[target_feature(enable = "sve,sve2")]
22682#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22683#[cfg_attr(test, assert_instr(suqadd))]
22684pub fn svuqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
22685 unsafe extern "unadjusted" {
22686 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv16i8")]
22687 fn _svuqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
22688 }
22689 unsafe { _svuqadd_s8_m(pg, op1, op2.as_signed()) }
22690}
22691#[doc = "Saturating add with unsigned addend"]
22692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_m)"]
22693#[inline(always)]
22694#[target_feature(enable = "sve,sve2")]
22695#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22696#[cfg_attr(test, assert_instr(suqadd))]
22697pub fn svuqadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
22698 svuqadd_s8_m(pg, op1, svdup_n_u8(op2))
22699}
22700#[doc = "Saturating add with unsigned addend"]
22701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_x)"]
22702#[inline(always)]
22703#[target_feature(enable = "sve,sve2")]
22704#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22705#[cfg_attr(test, assert_instr(suqadd))]
22706pub fn svuqadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
22707 svuqadd_s8_m(pg, op1, op2)
22708}
22709#[doc = "Saturating add with unsigned addend"]
22710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_x)"]
22711#[inline(always)]
22712#[target_feature(enable = "sve,sve2")]
22713#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22714#[cfg_attr(test, assert_instr(suqadd))]
22715pub fn svuqadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
22716 svuqadd_s8_x(pg, op1, svdup_n_u8(op2))
22717}
22718#[doc = "Saturating add with unsigned addend"]
22719#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_z)"]
22720#[inline(always)]
22721#[target_feature(enable = "sve,sve2")]
22722#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22723#[cfg_attr(test, assert_instr(suqadd))]
22724pub fn svuqadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
22725 svuqadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
22726}
22727#[doc = "Saturating add with unsigned addend"]
22728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_z)"]
22729#[inline(always)]
22730#[target_feature(enable = "sve,sve2")]
22731#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22732#[cfg_attr(test, assert_instr(suqadd))]
22733pub fn svuqadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
22734 svuqadd_s8_z(pg, op1, svdup_n_u8(op2))
22735}
22736#[doc = "Saturating add with unsigned addend"]
22737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_m)"]
22738#[inline(always)]
22739#[target_feature(enable = "sve,sve2")]
22740#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22741#[cfg_attr(test, assert_instr(suqadd))]
22742pub fn svuqadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
22743 unsafe extern "unadjusted" {
22744 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv8i16")]
22745 fn _svuqadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
22746 }
22747 unsafe { _svuqadd_s16_m(pg.sve_into(), op1, op2.as_signed()) }
22748}
22749#[doc = "Saturating add with unsigned addend"]
22750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_m)"]
22751#[inline(always)]
22752#[target_feature(enable = "sve,sve2")]
22753#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22754#[cfg_attr(test, assert_instr(suqadd))]
22755pub fn svuqadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
22756 svuqadd_s16_m(pg, op1, svdup_n_u16(op2))
22757}
22758#[doc = "Saturating add with unsigned addend"]
22759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_x)"]
22760#[inline(always)]
22761#[target_feature(enable = "sve,sve2")]
22762#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22763#[cfg_attr(test, assert_instr(suqadd))]
22764pub fn svuqadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
22765 svuqadd_s16_m(pg, op1, op2)
22766}
22767#[doc = "Saturating add with unsigned addend"]
22768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_x)"]
22769#[inline(always)]
22770#[target_feature(enable = "sve,sve2")]
22771#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22772#[cfg_attr(test, assert_instr(suqadd))]
22773pub fn svuqadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
22774 svuqadd_s16_x(pg, op1, svdup_n_u16(op2))
22775}
22776#[doc = "Saturating add with unsigned addend"]
22777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_z)"]
22778#[inline(always)]
22779#[target_feature(enable = "sve,sve2")]
22780#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22781#[cfg_attr(test, assert_instr(suqadd))]
22782pub fn svuqadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
22783 svuqadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
22784}
22785#[doc = "Saturating add with unsigned addend"]
22786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_z)"]
22787#[inline(always)]
22788#[target_feature(enable = "sve,sve2")]
22789#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22790#[cfg_attr(test, assert_instr(suqadd))]
22791pub fn svuqadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
22792 svuqadd_s16_z(pg, op1, svdup_n_u16(op2))
22793}
22794#[doc = "Saturating add with unsigned addend"]
22795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_m)"]
22796#[inline(always)]
22797#[target_feature(enable = "sve,sve2")]
22798#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22799#[cfg_attr(test, assert_instr(suqadd))]
22800pub fn svuqadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
22801 unsafe extern "unadjusted" {
22802 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv4i32")]
22803 fn _svuqadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
22804 }
22805 unsafe { _svuqadd_s32_m(pg.sve_into(), op1, op2.as_signed()) }
22806}
22807#[doc = "Saturating add with unsigned addend"]
22808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_m)"]
22809#[inline(always)]
22810#[target_feature(enable = "sve,sve2")]
22811#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22812#[cfg_attr(test, assert_instr(suqadd))]
22813pub fn svuqadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
22814 svuqadd_s32_m(pg, op1, svdup_n_u32(op2))
22815}
22816#[doc = "Saturating add with unsigned addend"]
22817#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_x)"]
22818#[inline(always)]
22819#[target_feature(enable = "sve,sve2")]
22820#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22821#[cfg_attr(test, assert_instr(suqadd))]
22822pub fn svuqadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
22823 svuqadd_s32_m(pg, op1, op2)
22824}
22825#[doc = "Saturating add with unsigned addend"]
22826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_x)"]
22827#[inline(always)]
22828#[target_feature(enable = "sve,sve2")]
22829#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22830#[cfg_attr(test, assert_instr(suqadd))]
22831pub fn svuqadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
22832 svuqadd_s32_x(pg, op1, svdup_n_u32(op2))
22833}
22834#[doc = "Saturating add with unsigned addend"]
22835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_z)"]
22836#[inline(always)]
22837#[target_feature(enable = "sve,sve2")]
22838#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22839#[cfg_attr(test, assert_instr(suqadd))]
22840pub fn svuqadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
22841 svuqadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
22842}
22843#[doc = "Saturating add with unsigned addend"]
22844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_z)"]
22845#[inline(always)]
22846#[target_feature(enable = "sve,sve2")]
22847#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22848#[cfg_attr(test, assert_instr(suqadd))]
22849pub fn svuqadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
22850 svuqadd_s32_z(pg, op1, svdup_n_u32(op2))
22851}
22852#[doc = "Saturating add with unsigned addend"]
22853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_m)"]
22854#[inline(always)]
22855#[target_feature(enable = "sve,sve2")]
22856#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22857#[cfg_attr(test, assert_instr(suqadd))]
22858pub fn svuqadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
22859 unsafe extern "unadjusted" {
22860 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv2i64")]
22861 fn _svuqadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
22862 }
22863 unsafe { _svuqadd_s64_m(pg.sve_into(), op1, op2.as_signed()) }
22864}
22865#[doc = "Saturating add with unsigned addend"]
22866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_m)"]
22867#[inline(always)]
22868#[target_feature(enable = "sve,sve2")]
22869#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22870#[cfg_attr(test, assert_instr(suqadd))]
22871pub fn svuqadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
22872 svuqadd_s64_m(pg, op1, svdup_n_u64(op2))
22873}
22874#[doc = "Saturating add with unsigned addend"]
22875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_x)"]
22876#[inline(always)]
22877#[target_feature(enable = "sve,sve2")]
22878#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22879#[cfg_attr(test, assert_instr(suqadd))]
22880pub fn svuqadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
22881 svuqadd_s64_m(pg, op1, op2)
22882}
22883#[doc = "Saturating add with unsigned addend"]
22884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_x)"]
22885#[inline(always)]
22886#[target_feature(enable = "sve,sve2")]
22887#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22888#[cfg_attr(test, assert_instr(suqadd))]
22889pub fn svuqadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
22890 svuqadd_s64_x(pg, op1, svdup_n_u64(op2))
22891}
22892#[doc = "Saturating add with unsigned addend"]
22893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_z)"]
22894#[inline(always)]
22895#[target_feature(enable = "sve,sve2")]
22896#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22897#[cfg_attr(test, assert_instr(suqadd))]
22898pub fn svuqadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
22899 svuqadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
22900}
22901#[doc = "Saturating add with unsigned addend"]
22902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_z)"]
22903#[inline(always)]
22904#[target_feature(enable = "sve,sve2")]
22905#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22906#[cfg_attr(test, assert_instr(suqadd))]
22907pub fn svuqadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
22908 svuqadd_s64_z(pg, op1, svdup_n_u64(op2))
22909}
22910#[doc = "While decrementing scalar is greater than or equal to"]
22911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_s32])"]
22912#[inline(always)]
22913#[target_feature(enable = "sve,sve2")]
22914#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22915#[cfg_attr(test, assert_instr(whilege))]
22916pub fn svwhilege_b8_s32(op1: i32, op2: i32) -> svbool_t {
22917 unsafe extern "unadjusted" {
22918 #[cfg_attr(
22919 target_arch = "aarch64",
22920 link_name = "llvm.aarch64.sve.whilege.nxv16i1.i32"
22921 )]
22922 fn _svwhilege_b8_s32(op1: i32, op2: i32) -> svbool_t;
22923 }
22924 unsafe { _svwhilege_b8_s32(op1, op2) }
22925}
22926#[doc = "While decrementing scalar is greater than or equal to"]
22927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_s32])"]
22928#[inline(always)]
22929#[target_feature(enable = "sve,sve2")]
22930#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22931#[cfg_attr(test, assert_instr(whilege))]
22932pub fn svwhilege_b16_s32(op1: i32, op2: i32) -> svbool_t {
22933 unsafe extern "unadjusted" {
22934 #[cfg_attr(
22935 target_arch = "aarch64",
22936 link_name = "llvm.aarch64.sve.whilege.nxv8i1.i32"
22937 )]
22938 fn _svwhilege_b16_s32(op1: i32, op2: i32) -> svbool8_t;
22939 }
22940 unsafe { _svwhilege_b16_s32(op1, op2).sve_into() }
22941}
22942#[doc = "While decrementing scalar is greater than or equal to"]
22943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_s32])"]
22944#[inline(always)]
22945#[target_feature(enable = "sve,sve2")]
22946#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22947#[cfg_attr(test, assert_instr(whilege))]
22948pub fn svwhilege_b32_s32(op1: i32, op2: i32) -> svbool_t {
22949 unsafe extern "unadjusted" {
22950 #[cfg_attr(
22951 target_arch = "aarch64",
22952 link_name = "llvm.aarch64.sve.whilege.nxv4i1.i32"
22953 )]
22954 fn _svwhilege_b32_s32(op1: i32, op2: i32) -> svbool4_t;
22955 }
22956 unsafe { _svwhilege_b32_s32(op1, op2).sve_into() }
22957}
22958#[doc = "While decrementing scalar is greater than or equal to"]
22959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_s32])"]
22960#[inline(always)]
22961#[target_feature(enable = "sve,sve2")]
22962#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22963#[cfg_attr(test, assert_instr(whilege))]
22964pub fn svwhilege_b64_s32(op1: i32, op2: i32) -> svbool_t {
22965 unsafe extern "unadjusted" {
22966 #[cfg_attr(
22967 target_arch = "aarch64",
22968 link_name = "llvm.aarch64.sve.whilege.nxv2i1.i32"
22969 )]
22970 fn _svwhilege_b64_s32(op1: i32, op2: i32) -> svbool2_t;
22971 }
22972 unsafe { _svwhilege_b64_s32(op1, op2).sve_into() }
22973}
22974#[doc = "While decrementing scalar is greater than or equal to"]
22975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_s64])"]
22976#[inline(always)]
22977#[target_feature(enable = "sve,sve2")]
22978#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22979#[cfg_attr(test, assert_instr(whilege))]
22980pub fn svwhilege_b8_s64(op1: i64, op2: i64) -> svbool_t {
22981 unsafe extern "unadjusted" {
22982 #[cfg_attr(
22983 target_arch = "aarch64",
22984 link_name = "llvm.aarch64.sve.whilege.nxv16i1.i64"
22985 )]
22986 fn _svwhilege_b8_s64(op1: i64, op2: i64) -> svbool_t;
22987 }
22988 unsafe { _svwhilege_b8_s64(op1, op2) }
22989}
22990#[doc = "While decrementing scalar is greater than or equal to"]
22991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_s64])"]
22992#[inline(always)]
22993#[target_feature(enable = "sve,sve2")]
22994#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22995#[cfg_attr(test, assert_instr(whilege))]
22996pub fn svwhilege_b16_s64(op1: i64, op2: i64) -> svbool_t {
22997 unsafe extern "unadjusted" {
22998 #[cfg_attr(
22999 target_arch = "aarch64",
23000 link_name = "llvm.aarch64.sve.whilege.nxv8i1.i64"
23001 )]
23002 fn _svwhilege_b16_s64(op1: i64, op2: i64) -> svbool8_t;
23003 }
23004 unsafe { _svwhilege_b16_s64(op1, op2).sve_into() }
23005}
23006#[doc = "While decrementing scalar is greater than or equal to"]
23007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_s64])"]
23008#[inline(always)]
23009#[target_feature(enable = "sve,sve2")]
23010#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23011#[cfg_attr(test, assert_instr(whilege))]
23012pub fn svwhilege_b32_s64(op1: i64, op2: i64) -> svbool_t {
23013 unsafe extern "unadjusted" {
23014 #[cfg_attr(
23015 target_arch = "aarch64",
23016 link_name = "llvm.aarch64.sve.whilege.nxv4i1.i64"
23017 )]
23018 fn _svwhilege_b32_s64(op1: i64, op2: i64) -> svbool4_t;
23019 }
23020 unsafe { _svwhilege_b32_s64(op1, op2).sve_into() }
23021}
23022#[doc = "While decrementing scalar is greater than or equal to"]
23023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_s64])"]
23024#[inline(always)]
23025#[target_feature(enable = "sve,sve2")]
23026#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23027#[cfg_attr(test, assert_instr(whilege))]
23028pub fn svwhilege_b64_s64(op1: i64, op2: i64) -> svbool_t {
23029 unsafe extern "unadjusted" {
23030 #[cfg_attr(
23031 target_arch = "aarch64",
23032 link_name = "llvm.aarch64.sve.whilege.nxv2i1.i64"
23033 )]
23034 fn _svwhilege_b64_s64(op1: i64, op2: i64) -> svbool2_t;
23035 }
23036 unsafe { _svwhilege_b64_s64(op1, op2).sve_into() }
23037}
23038#[doc = "While decrementing scalar is greater than or equal to"]
23039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_u32])"]
23040#[inline(always)]
23041#[target_feature(enable = "sve,sve2")]
23042#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23043#[cfg_attr(test, assert_instr(whilehs))]
23044pub fn svwhilege_b8_u32(op1: u32, op2: u32) -> svbool_t {
23045 unsafe extern "unadjusted" {
23046 #[cfg_attr(
23047 target_arch = "aarch64",
23048 link_name = "llvm.aarch64.sve.whilehs.nxv16i1.i32"
23049 )]
23050 fn _svwhilege_b8_u32(op1: i32, op2: i32) -> svbool_t;
23051 }
23052 unsafe { _svwhilege_b8_u32(op1.as_signed(), op2.as_signed()) }
23053}
23054#[doc = "While decrementing scalar is greater than or equal to"]
23055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_u32])"]
23056#[inline(always)]
23057#[target_feature(enable = "sve,sve2")]
23058#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23059#[cfg_attr(test, assert_instr(whilehs))]
23060pub fn svwhilege_b16_u32(op1: u32, op2: u32) -> svbool_t {
23061 unsafe extern "unadjusted" {
23062 #[cfg_attr(
23063 target_arch = "aarch64",
23064 link_name = "llvm.aarch64.sve.whilehs.nxv8i1.i32"
23065 )]
23066 fn _svwhilege_b16_u32(op1: i32, op2: i32) -> svbool8_t;
23067 }
23068 unsafe { _svwhilege_b16_u32(op1.as_signed(), op2.as_signed()).sve_into() }
23069}
23070#[doc = "While decrementing scalar is greater than or equal to"]
23071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_u32])"]
23072#[inline(always)]
23073#[target_feature(enable = "sve,sve2")]
23074#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23075#[cfg_attr(test, assert_instr(whilehs))]
23076pub fn svwhilege_b32_u32(op1: u32, op2: u32) -> svbool_t {
23077 unsafe extern "unadjusted" {
23078 #[cfg_attr(
23079 target_arch = "aarch64",
23080 link_name = "llvm.aarch64.sve.whilehs.nxv4i1.i32"
23081 )]
23082 fn _svwhilege_b32_u32(op1: i32, op2: i32) -> svbool4_t;
23083 }
23084 unsafe { _svwhilege_b32_u32(op1.as_signed(), op2.as_signed()).sve_into() }
23085}
23086#[doc = "While decrementing scalar is greater than or equal to"]
23087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_u32])"]
23088#[inline(always)]
23089#[target_feature(enable = "sve,sve2")]
23090#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23091#[cfg_attr(test, assert_instr(whilehs))]
23092pub fn svwhilege_b64_u32(op1: u32, op2: u32) -> svbool_t {
23093 unsafe extern "unadjusted" {
23094 #[cfg_attr(
23095 target_arch = "aarch64",
23096 link_name = "llvm.aarch64.sve.whilehs.nxv2i1.i32"
23097 )]
23098 fn _svwhilege_b64_u32(op1: i32, op2: i32) -> svbool2_t;
23099 }
23100 unsafe { _svwhilege_b64_u32(op1.as_signed(), op2.as_signed()).sve_into() }
23101}
23102#[doc = "While decrementing scalar is greater than or equal to"]
23103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_u64])"]
23104#[inline(always)]
23105#[target_feature(enable = "sve,sve2")]
23106#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23107#[cfg_attr(test, assert_instr(whilehs))]
23108pub fn svwhilege_b8_u64(op1: u64, op2: u64) -> svbool_t {
23109 unsafe extern "unadjusted" {
23110 #[cfg_attr(
23111 target_arch = "aarch64",
23112 link_name = "llvm.aarch64.sve.whilehs.nxv16i1.i64"
23113 )]
23114 fn _svwhilege_b8_u64(op1: i64, op2: i64) -> svbool_t;
23115 }
23116 unsafe { _svwhilege_b8_u64(op1.as_signed(), op2.as_signed()) }
23117}
23118#[doc = "While decrementing scalar is greater than or equal to"]
23119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_u64])"]
23120#[inline(always)]
23121#[target_feature(enable = "sve,sve2")]
23122#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23123#[cfg_attr(test, assert_instr(whilehs))]
23124pub fn svwhilege_b16_u64(op1: u64, op2: u64) -> svbool_t {
23125 unsafe extern "unadjusted" {
23126 #[cfg_attr(
23127 target_arch = "aarch64",
23128 link_name = "llvm.aarch64.sve.whilehs.nxv8i1.i64"
23129 )]
23130 fn _svwhilege_b16_u64(op1: i64, op2: i64) -> svbool8_t;
23131 }
23132 unsafe { _svwhilege_b16_u64(op1.as_signed(), op2.as_signed()).sve_into() }
23133}
23134#[doc = "While decrementing scalar is greater than or equal to"]
23135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_u64])"]
23136#[inline(always)]
23137#[target_feature(enable = "sve,sve2")]
23138#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23139#[cfg_attr(test, assert_instr(whilehs))]
23140pub fn svwhilege_b32_u64(op1: u64, op2: u64) -> svbool_t {
23141 unsafe extern "unadjusted" {
23142 #[cfg_attr(
23143 target_arch = "aarch64",
23144 link_name = "llvm.aarch64.sve.whilehs.nxv4i1.i64"
23145 )]
23146 fn _svwhilege_b32_u64(op1: i64, op2: i64) -> svbool4_t;
23147 }
23148 unsafe { _svwhilege_b32_u64(op1.as_signed(), op2.as_signed()).sve_into() }
23149}
23150#[doc = "While decrementing scalar is greater than or equal to"]
23151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_u64])"]
23152#[inline(always)]
23153#[target_feature(enable = "sve,sve2")]
23154#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23155#[cfg_attr(test, assert_instr(whilehs))]
23156pub fn svwhilege_b64_u64(op1: u64, op2: u64) -> svbool_t {
23157 unsafe extern "unadjusted" {
23158 #[cfg_attr(
23159 target_arch = "aarch64",
23160 link_name = "llvm.aarch64.sve.whilehs.nxv2i1.i64"
23161 )]
23162 fn _svwhilege_b64_u64(op1: i64, op2: i64) -> svbool2_t;
23163 }
23164 unsafe { _svwhilege_b64_u64(op1.as_signed(), op2.as_signed()).sve_into() }
23165}
23166#[doc = "While decrementing scalar is greater than"]
23167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_s32])"]
23168#[inline(always)]
23169#[target_feature(enable = "sve,sve2")]
23170#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23171#[cfg_attr(test, assert_instr(whilegt))]
23172pub fn svwhilegt_b8_s32(op1: i32, op2: i32) -> svbool_t {
23173 unsafe extern "unadjusted" {
23174 #[cfg_attr(
23175 target_arch = "aarch64",
23176 link_name = "llvm.aarch64.sve.whilegt.nxv16i1.i32"
23177 )]
23178 fn _svwhilegt_b8_s32(op1: i32, op2: i32) -> svbool_t;
23179 }
23180 unsafe { _svwhilegt_b8_s32(op1, op2) }
23181}
23182#[doc = "While decrementing scalar is greater than"]
23183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_s32])"]
23184#[inline(always)]
23185#[target_feature(enable = "sve,sve2")]
23186#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23187#[cfg_attr(test, assert_instr(whilegt))]
23188pub fn svwhilegt_b16_s32(op1: i32, op2: i32) -> svbool_t {
23189 unsafe extern "unadjusted" {
23190 #[cfg_attr(
23191 target_arch = "aarch64",
23192 link_name = "llvm.aarch64.sve.whilegt.nxv8i1.i32"
23193 )]
23194 fn _svwhilegt_b16_s32(op1: i32, op2: i32) -> svbool8_t;
23195 }
23196 unsafe { _svwhilegt_b16_s32(op1, op2).sve_into() }
23197}
23198#[doc = "While decrementing scalar is greater than"]
23199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_s32])"]
23200#[inline(always)]
23201#[target_feature(enable = "sve,sve2")]
23202#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23203#[cfg_attr(test, assert_instr(whilegt))]
23204pub fn svwhilegt_b32_s32(op1: i32, op2: i32) -> svbool_t {
23205 unsafe extern "unadjusted" {
23206 #[cfg_attr(
23207 target_arch = "aarch64",
23208 link_name = "llvm.aarch64.sve.whilegt.nxv4i1.i32"
23209 )]
23210 fn _svwhilegt_b32_s32(op1: i32, op2: i32) -> svbool4_t;
23211 }
23212 unsafe { _svwhilegt_b32_s32(op1, op2).sve_into() }
23213}
23214#[doc = "While decrementing scalar is greater than"]
23215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_s32])"]
23216#[inline(always)]
23217#[target_feature(enable = "sve,sve2")]
23218#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23219#[cfg_attr(test, assert_instr(whilegt))]
23220pub fn svwhilegt_b64_s32(op1: i32, op2: i32) -> svbool_t {
23221 unsafe extern "unadjusted" {
23222 #[cfg_attr(
23223 target_arch = "aarch64",
23224 link_name = "llvm.aarch64.sve.whilegt.nxv2i1.i32"
23225 )]
23226 fn _svwhilegt_b64_s32(op1: i32, op2: i32) -> svbool2_t;
23227 }
23228 unsafe { _svwhilegt_b64_s32(op1, op2).sve_into() }
23229}
23230#[doc = "While decrementing scalar is greater than"]
23231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_s64])"]
23232#[inline(always)]
23233#[target_feature(enable = "sve,sve2")]
23234#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23235#[cfg_attr(test, assert_instr(whilegt))]
23236pub fn svwhilegt_b8_s64(op1: i64, op2: i64) -> svbool_t {
23237 unsafe extern "unadjusted" {
23238 #[cfg_attr(
23239 target_arch = "aarch64",
23240 link_name = "llvm.aarch64.sve.whilegt.nxv16i1.i64"
23241 )]
23242 fn _svwhilegt_b8_s64(op1: i64, op2: i64) -> svbool_t;
23243 }
23244 unsafe { _svwhilegt_b8_s64(op1, op2) }
23245}
23246#[doc = "While decrementing scalar is greater than"]
23247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_s64])"]
23248#[inline(always)]
23249#[target_feature(enable = "sve,sve2")]
23250#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23251#[cfg_attr(test, assert_instr(whilegt))]
23252pub fn svwhilegt_b16_s64(op1: i64, op2: i64) -> svbool_t {
23253 unsafe extern "unadjusted" {
23254 #[cfg_attr(
23255 target_arch = "aarch64",
23256 link_name = "llvm.aarch64.sve.whilegt.nxv8i1.i64"
23257 )]
23258 fn _svwhilegt_b16_s64(op1: i64, op2: i64) -> svbool8_t;
23259 }
23260 unsafe { _svwhilegt_b16_s64(op1, op2).sve_into() }
23261}
23262#[doc = "While decrementing scalar is greater than"]
23263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_s64])"]
23264#[inline(always)]
23265#[target_feature(enable = "sve,sve2")]
23266#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23267#[cfg_attr(test, assert_instr(whilegt))]
23268pub fn svwhilegt_b32_s64(op1: i64, op2: i64) -> svbool_t {
23269 unsafe extern "unadjusted" {
23270 #[cfg_attr(
23271 target_arch = "aarch64",
23272 link_name = "llvm.aarch64.sve.whilegt.nxv4i1.i64"
23273 )]
23274 fn _svwhilegt_b32_s64(op1: i64, op2: i64) -> svbool4_t;
23275 }
23276 unsafe { _svwhilegt_b32_s64(op1, op2).sve_into() }
23277}
23278#[doc = "While decrementing scalar is greater than"]
23279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_s64])"]
23280#[inline(always)]
23281#[target_feature(enable = "sve,sve2")]
23282#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23283#[cfg_attr(test, assert_instr(whilegt))]
23284pub fn svwhilegt_b64_s64(op1: i64, op2: i64) -> svbool_t {
23285 unsafe extern "unadjusted" {
23286 #[cfg_attr(
23287 target_arch = "aarch64",
23288 link_name = "llvm.aarch64.sve.whilegt.nxv2i1.i64"
23289 )]
23290 fn _svwhilegt_b64_s64(op1: i64, op2: i64) -> svbool2_t;
23291 }
23292 unsafe { _svwhilegt_b64_s64(op1, op2).sve_into() }
23293}
23294#[doc = "While decrementing scalar is greater than"]
23295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_u32])"]
23296#[inline(always)]
23297#[target_feature(enable = "sve,sve2")]
23298#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23299#[cfg_attr(test, assert_instr(whilehi))]
23300pub fn svwhilegt_b8_u32(op1: u32, op2: u32) -> svbool_t {
23301 unsafe extern "unadjusted" {
23302 #[cfg_attr(
23303 target_arch = "aarch64",
23304 link_name = "llvm.aarch64.sve.whilehi.nxv16i1.i32"
23305 )]
23306 fn _svwhilegt_b8_u32(op1: i32, op2: i32) -> svbool_t;
23307 }
23308 unsafe { _svwhilegt_b8_u32(op1.as_signed(), op2.as_signed()) }
23309}
23310#[doc = "While decrementing scalar is greater than"]
23311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_u32])"]
23312#[inline(always)]
23313#[target_feature(enable = "sve,sve2")]
23314#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23315#[cfg_attr(test, assert_instr(whilehi))]
23316pub fn svwhilegt_b16_u32(op1: u32, op2: u32) -> svbool_t {
23317 unsafe extern "unadjusted" {
23318 #[cfg_attr(
23319 target_arch = "aarch64",
23320 link_name = "llvm.aarch64.sve.whilehi.nxv8i1.i32"
23321 )]
23322 fn _svwhilegt_b16_u32(op1: i32, op2: i32) -> svbool8_t;
23323 }
23324 unsafe { _svwhilegt_b16_u32(op1.as_signed(), op2.as_signed()).sve_into() }
23325}
23326#[doc = "While decrementing scalar is greater than"]
23327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_u32])"]
23328#[inline(always)]
23329#[target_feature(enable = "sve,sve2")]
23330#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23331#[cfg_attr(test, assert_instr(whilehi))]
23332pub fn svwhilegt_b32_u32(op1: u32, op2: u32) -> svbool_t {
23333 unsafe extern "unadjusted" {
23334 #[cfg_attr(
23335 target_arch = "aarch64",
23336 link_name = "llvm.aarch64.sve.whilehi.nxv4i1.i32"
23337 )]
23338 fn _svwhilegt_b32_u32(op1: i32, op2: i32) -> svbool4_t;
23339 }
23340 unsafe { _svwhilegt_b32_u32(op1.as_signed(), op2.as_signed()).sve_into() }
23341}
23342#[doc = "While decrementing scalar is greater than"]
23343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_u32])"]
23344#[inline(always)]
23345#[target_feature(enable = "sve,sve2")]
23346#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23347#[cfg_attr(test, assert_instr(whilehi))]
23348pub fn svwhilegt_b64_u32(op1: u32, op2: u32) -> svbool_t {
23349 unsafe extern "unadjusted" {
23350 #[cfg_attr(
23351 target_arch = "aarch64",
23352 link_name = "llvm.aarch64.sve.whilehi.nxv2i1.i32"
23353 )]
23354 fn _svwhilegt_b64_u32(op1: i32, op2: i32) -> svbool2_t;
23355 }
23356 unsafe { _svwhilegt_b64_u32(op1.as_signed(), op2.as_signed()).sve_into() }
23357}
23358#[doc = "While decrementing scalar is greater than"]
23359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_u64])"]
23360#[inline(always)]
23361#[target_feature(enable = "sve,sve2")]
23362#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23363#[cfg_attr(test, assert_instr(whilehi))]
23364pub fn svwhilegt_b8_u64(op1: u64, op2: u64) -> svbool_t {
23365 unsafe extern "unadjusted" {
23366 #[cfg_attr(
23367 target_arch = "aarch64",
23368 link_name = "llvm.aarch64.sve.whilehi.nxv16i1.i64"
23369 )]
23370 fn _svwhilegt_b8_u64(op1: i64, op2: i64) -> svbool_t;
23371 }
23372 unsafe { _svwhilegt_b8_u64(op1.as_signed(), op2.as_signed()) }
23373}
23374#[doc = "While decrementing scalar is greater than"]
23375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_u64])"]
23376#[inline(always)]
23377#[target_feature(enable = "sve,sve2")]
23378#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23379#[cfg_attr(test, assert_instr(whilehi))]
23380pub fn svwhilegt_b16_u64(op1: u64, op2: u64) -> svbool_t {
23381 unsafe extern "unadjusted" {
23382 #[cfg_attr(
23383 target_arch = "aarch64",
23384 link_name = "llvm.aarch64.sve.whilehi.nxv8i1.i64"
23385 )]
23386 fn _svwhilegt_b16_u64(op1: i64, op2: i64) -> svbool8_t;
23387 }
23388 unsafe { _svwhilegt_b16_u64(op1.as_signed(), op2.as_signed()).sve_into() }
23389}
23390#[doc = "While decrementing scalar is greater than"]
23391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_u64])"]
23392#[inline(always)]
23393#[target_feature(enable = "sve,sve2")]
23394#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23395#[cfg_attr(test, assert_instr(whilehi))]
23396pub fn svwhilegt_b32_u64(op1: u64, op2: u64) -> svbool_t {
23397 unsafe extern "unadjusted" {
23398 #[cfg_attr(
23399 target_arch = "aarch64",
23400 link_name = "llvm.aarch64.sve.whilehi.nxv4i1.i64"
23401 )]
23402 fn _svwhilegt_b32_u64(op1: i64, op2: i64) -> svbool4_t;
23403 }
23404 unsafe { _svwhilegt_b32_u64(op1.as_signed(), op2.as_signed()).sve_into() }
23405}
23406#[doc = "While decrementing scalar is greater than"]
23407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_u64])"]
23408#[inline(always)]
23409#[target_feature(enable = "sve,sve2")]
23410#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23411#[cfg_attr(test, assert_instr(whilehi))]
23412pub fn svwhilegt_b64_u64(op1: u64, op2: u64) -> svbool_t {
23413 unsafe extern "unadjusted" {
23414 #[cfg_attr(
23415 target_arch = "aarch64",
23416 link_name = "llvm.aarch64.sve.whilehi.nxv2i1.i64"
23417 )]
23418 fn _svwhilegt_b64_u64(op1: i64, op2: i64) -> svbool2_t;
23419 }
23420 unsafe { _svwhilegt_b64_u64(op1.as_signed(), op2.as_signed()).sve_into() }
23421}
23422#[inline(always)]
23423#[target_feature(enable = "sve,sve2")]
23424#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23425unsafe fn svwhilerw_8ptr<T>(op1: *const T, op2: *const T) -> svbool_t {
23426 let op1 = op1 as *const crate::ffi::c_void;
23427 let op2 = op2 as *const crate::ffi::c_void;
23428 unsafe extern "unadjusted" {
23429 #[cfg_attr(
23430 target_arch = "aarch64",
23431 link_name = "llvm.aarch64.sve.whilerw.b.nxv16i1.p0"
23432 )]
23433 fn _svwhilerw_8ptr(
23434 op1: *const crate::ffi::c_void,
23435 op2: *const crate::ffi::c_void,
23436 ) -> svbool_t;
23437 }
23438 _svwhilerw_8ptr(op1, op2)
23439}
23440#[inline(always)]
23441#[target_feature(enable = "sve,sve2")]
23442#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23443unsafe fn svwhilerw_16ptr<T>(op1: *const T, op2: *const T) -> svbool_t {
23444 let op1 = op1 as *const crate::ffi::c_void;
23445 let op2 = op2 as *const crate::ffi::c_void;
23446 unsafe extern "unadjusted" {
23447 #[cfg_attr(
23448 target_arch = "aarch64",
23449 link_name = "llvm.aarch64.sve.whilerw.h.nxv8i1.p0"
23450 )]
23451 fn _svwhilerw_16ptr(
23452 op1: *const crate::ffi::c_void,
23453 op2: *const crate::ffi::c_void,
23454 ) -> svbool8_t;
23455 }
23456 _svwhilerw_16ptr(op1, op2).sve_into()
23457}
23458#[inline(always)]
23459#[target_feature(enable = "sve,sve2")]
23460#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23461unsafe fn svwhilerw_32ptr<T>(op1: *const T, op2: *const T) -> svbool_t {
23462 let op1 = op1 as *const crate::ffi::c_void;
23463 let op2 = op2 as *const crate::ffi::c_void;
23464 unsafe extern "unadjusted" {
23465 #[cfg_attr(
23466 target_arch = "aarch64",
23467 link_name = "llvm.aarch64.sve.whilerw.s.nxv4i1.p0"
23468 )]
23469 fn _svwhilerw_32ptr(
23470 op1: *const crate::ffi::c_void,
23471 op2: *const crate::ffi::c_void,
23472 ) -> svbool4_t;
23473 }
23474 _svwhilerw_32ptr(op1, op2).sve_into()
23475}
23476#[inline(always)]
23477#[target_feature(enable = "sve,sve2")]
23478#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23479unsafe fn svwhilerw_64ptr<T>(op1: *const T, op2: *const T) -> svbool_t {
23480 let op1 = op1 as *const crate::ffi::c_void;
23481 let op2 = op2 as *const crate::ffi::c_void;
23482 unsafe extern "unadjusted" {
23483 #[cfg_attr(
23484 target_arch = "aarch64",
23485 link_name = "llvm.aarch64.sve.whilerw.d.nxv2i1.p0"
23486 )]
23487 fn _svwhilerw_64ptr(
23488 op1: *const crate::ffi::c_void,
23489 op2: *const crate::ffi::c_void,
23490 ) -> svbool2_t;
23491 }
23492 _svwhilerw_64ptr(op1, op2).sve_into()
23493}
23494#[doc = "While free of read-after-write conflicts"]
23495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_f32])"]
23496#[doc = "## Safety"]
23497#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23498#[inline(always)]
23499#[target_feature(enable = "sve,sve2")]
23500#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23501#[cfg_attr(test, assert_instr(whilerw))]
23502pub unsafe fn svwhilerw_f32(op1: *const f32, op2: *const f32) -> svbool_t {
23503 svwhilerw_32ptr::<f32>(op1, op2)
23504}
23505#[doc = "While free of read-after-write conflicts"]
23506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_f64])"]
23507#[doc = "## Safety"]
23508#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23509#[inline(always)]
23510#[target_feature(enable = "sve,sve2")]
23511#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23512#[cfg_attr(test, assert_instr(whilerw))]
23513pub unsafe fn svwhilerw_f64(op1: *const f64, op2: *const f64) -> svbool_t {
23514 svwhilerw_64ptr::<f64>(op1, op2)
23515}
23516#[doc = "While free of read-after-write conflicts"]
23517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s8])"]
23518#[doc = "## Safety"]
23519#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23520#[inline(always)]
23521#[target_feature(enable = "sve,sve2")]
23522#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23523#[cfg_attr(test, assert_instr(whilerw))]
23524pub unsafe fn svwhilerw_s8(op1: *const i8, op2: *const i8) -> svbool_t {
23525 svwhilerw_8ptr::<i8>(op1, op2)
23526}
23527#[doc = "While free of read-after-write conflicts"]
23528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s16])"]
23529#[doc = "## Safety"]
23530#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23531#[inline(always)]
23532#[target_feature(enable = "sve,sve2")]
23533#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23534#[cfg_attr(test, assert_instr(whilerw))]
23535pub unsafe fn svwhilerw_s16(op1: *const i16, op2: *const i16) -> svbool_t {
23536 svwhilerw_16ptr::<i16>(op1, op2)
23537}
23538#[doc = "While free of read-after-write conflicts"]
23539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s32])"]
23540#[doc = "## Safety"]
23541#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23542#[inline(always)]
23543#[target_feature(enable = "sve,sve2")]
23544#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23545#[cfg_attr(test, assert_instr(whilerw))]
23546pub unsafe fn svwhilerw_s32(op1: *const i32, op2: *const i32) -> svbool_t {
23547 svwhilerw_32ptr::<i32>(op1, op2)
23548}
23549#[doc = "While free of read-after-write conflicts"]
23550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s64])"]
23551#[doc = "## Safety"]
23552#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23553#[inline(always)]
23554#[target_feature(enable = "sve,sve2")]
23555#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23556#[cfg_attr(test, assert_instr(whilerw))]
23557pub unsafe fn svwhilerw_s64(op1: *const i64, op2: *const i64) -> svbool_t {
23558 svwhilerw_64ptr::<i64>(op1, op2)
23559}
23560#[doc = "While free of read-after-write conflicts"]
23561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u8])"]
23562#[doc = "## Safety"]
23563#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23564#[inline(always)]
23565#[target_feature(enable = "sve,sve2")]
23566#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23567#[cfg_attr(test, assert_instr(whilerw))]
23568pub unsafe fn svwhilerw_u8(op1: *const u8, op2: *const u8) -> svbool_t {
23569 svwhilerw_8ptr::<u8>(op1, op2)
23570}
23571#[doc = "While free of read-after-write conflicts"]
23572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u16])"]
23573#[doc = "## Safety"]
23574#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23575#[inline(always)]
23576#[target_feature(enable = "sve,sve2")]
23577#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23578#[cfg_attr(test, assert_instr(whilerw))]
23579pub unsafe fn svwhilerw_u16(op1: *const u16, op2: *const u16) -> svbool_t {
23580 svwhilerw_16ptr::<u16>(op1, op2)
23581}
23582#[doc = "While free of read-after-write conflicts"]
23583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u32])"]
23584#[doc = "## Safety"]
23585#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23586#[inline(always)]
23587#[target_feature(enable = "sve,sve2")]
23588#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23589#[cfg_attr(test, assert_instr(whilerw))]
23590pub unsafe fn svwhilerw_u32(op1: *const u32, op2: *const u32) -> svbool_t {
23591 svwhilerw_32ptr::<u32>(op1, op2)
23592}
23593#[doc = "While free of read-after-write conflicts"]
23594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u64])"]
23595#[doc = "## Safety"]
23596#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23597#[inline(always)]
23598#[target_feature(enable = "sve,sve2")]
23599#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23600#[cfg_attr(test, assert_instr(whilerw))]
23601pub unsafe fn svwhilerw_u64(op1: *const u64, op2: *const u64) -> svbool_t {
23602 svwhilerw_64ptr::<u64>(op1, op2)
23603}
23604#[inline(always)]
23605#[target_feature(enable = "sve,sve2")]
23606#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23607unsafe fn svwhilewr_8ptr<T>(op1: *const T, op2: *const T) -> svbool_t {
23608 let op1 = op1 as *const crate::ffi::c_void;
23609 let op2 = op2 as *const crate::ffi::c_void;
23610 unsafe extern "unadjusted" {
23611 #[cfg_attr(
23612 target_arch = "aarch64",
23613 link_name = "llvm.aarch64.sve.whilewr.b.nxv16i1.p0"
23614 )]
23615 fn _svwhilewr_8ptr(
23616 op1: *const crate::ffi::c_void,
23617 op2: *const crate::ffi::c_void,
23618 ) -> svbool_t;
23619 }
23620 _svwhilewr_8ptr(op1, op2)
23621}
23622#[inline(always)]
23623#[target_feature(enable = "sve,sve2")]
23624#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23625unsafe fn svwhilewr_16ptr<T>(op1: *const T, op2: *const T) -> svbool_t {
23626 let op1 = op1 as *const crate::ffi::c_void;
23627 let op2 = op2 as *const crate::ffi::c_void;
23628 unsafe extern "unadjusted" {
23629 #[cfg_attr(
23630 target_arch = "aarch64",
23631 link_name = "llvm.aarch64.sve.whilewr.h.nxv8i1.p0"
23632 )]
23633 fn _svwhilewr_16ptr(
23634 op1: *const crate::ffi::c_void,
23635 op2: *const crate::ffi::c_void,
23636 ) -> svbool8_t;
23637 }
23638 _svwhilewr_16ptr(op1, op2).sve_into()
23639}
23640#[inline(always)]
23641#[target_feature(enable = "sve,sve2")]
23642#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23643unsafe fn svwhilewr_32ptr<T>(op1: *const T, op2: *const T) -> svbool_t {
23644 let op1 = op1 as *const crate::ffi::c_void;
23645 let op2 = op2 as *const crate::ffi::c_void;
23646 unsafe extern "unadjusted" {
23647 #[cfg_attr(
23648 target_arch = "aarch64",
23649 link_name = "llvm.aarch64.sve.whilewr.s.nxv4i1.p0"
23650 )]
23651 fn _svwhilewr_32ptr(
23652 op1: *const crate::ffi::c_void,
23653 op2: *const crate::ffi::c_void,
23654 ) -> svbool4_t;
23655 }
23656 _svwhilewr_32ptr(op1, op2).sve_into()
23657}
23658#[inline(always)]
23659#[target_feature(enable = "sve,sve2")]
23660#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23661unsafe fn svwhilewr_64ptr<T>(op1: *const T, op2: *const T) -> svbool_t {
23662 let op1 = op1 as *const crate::ffi::c_void;
23663 let op2 = op2 as *const crate::ffi::c_void;
23664 unsafe extern "unadjusted" {
23665 #[cfg_attr(
23666 target_arch = "aarch64",
23667 link_name = "llvm.aarch64.sve.whilewr.d.nxv2i1.p0"
23668 )]
23669 fn _svwhilewr_64ptr(
23670 op1: *const crate::ffi::c_void,
23671 op2: *const crate::ffi::c_void,
23672 ) -> svbool2_t;
23673 }
23674 _svwhilewr_64ptr(op1, op2).sve_into()
23675}
23676#[doc = "While free of write-after-read conflicts"]
23677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_f32])"]
23678#[doc = "## Safety"]
23679#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23680#[inline(always)]
23681#[target_feature(enable = "sve,sve2")]
23682#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23683#[cfg_attr(test, assert_instr(whilewr))]
23684pub unsafe fn svwhilewr_f32(op1: *const f32, op2: *const f32) -> svbool_t {
23685 svwhilewr_32ptr::<f32>(op1, op2)
23686}
23687#[doc = "While free of write-after-read conflicts"]
23688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_f64])"]
23689#[doc = "## Safety"]
23690#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23691#[inline(always)]
23692#[target_feature(enable = "sve,sve2")]
23693#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23694#[cfg_attr(test, assert_instr(whilewr))]
23695pub unsafe fn svwhilewr_f64(op1: *const f64, op2: *const f64) -> svbool_t {
23696 svwhilewr_64ptr::<f64>(op1, op2)
23697}
23698#[doc = "While free of write-after-read conflicts"]
23699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s8])"]
23700#[doc = "## Safety"]
23701#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23702#[inline(always)]
23703#[target_feature(enable = "sve,sve2")]
23704#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23705#[cfg_attr(test, assert_instr(whilewr))]
23706pub unsafe fn svwhilewr_s8(op1: *const i8, op2: *const i8) -> svbool_t {
23707 svwhilewr_8ptr::<i8>(op1, op2)
23708}
23709#[doc = "While free of write-after-read conflicts"]
23710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s16])"]
23711#[doc = "## Safety"]
23712#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23713#[inline(always)]
23714#[target_feature(enable = "sve,sve2")]
23715#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23716#[cfg_attr(test, assert_instr(whilewr))]
23717pub unsafe fn svwhilewr_s16(op1: *const i16, op2: *const i16) -> svbool_t {
23718 svwhilewr_16ptr::<i16>(op1, op2)
23719}
23720#[doc = "While free of write-after-read conflicts"]
23721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s32])"]
23722#[doc = "## Safety"]
23723#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23724#[inline(always)]
23725#[target_feature(enable = "sve,sve2")]
23726#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23727#[cfg_attr(test, assert_instr(whilewr))]
23728pub unsafe fn svwhilewr_s32(op1: *const i32, op2: *const i32) -> svbool_t {
23729 svwhilewr_32ptr::<i32>(op1, op2)
23730}
23731#[doc = "While free of write-after-read conflicts"]
23732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s64])"]
23733#[doc = "## Safety"]
23734#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23735#[inline(always)]
23736#[target_feature(enable = "sve,sve2")]
23737#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23738#[cfg_attr(test, assert_instr(whilewr))]
23739pub unsafe fn svwhilewr_s64(op1: *const i64, op2: *const i64) -> svbool_t {
23740 svwhilewr_64ptr::<i64>(op1, op2)
23741}
23742#[doc = "While free of write-after-read conflicts"]
23743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u8])"]
23744#[doc = "## Safety"]
23745#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23746#[inline(always)]
23747#[target_feature(enable = "sve,sve2")]
23748#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23749#[cfg_attr(test, assert_instr(whilewr))]
23750pub unsafe fn svwhilewr_u8(op1: *const u8, op2: *const u8) -> svbool_t {
23751 svwhilewr_8ptr::<u8>(op1, op2)
23752}
23753#[doc = "While free of write-after-read conflicts"]
23754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u16])"]
23755#[doc = "## Safety"]
23756#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23757#[inline(always)]
23758#[target_feature(enable = "sve,sve2")]
23759#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23760#[cfg_attr(test, assert_instr(whilewr))]
23761pub unsafe fn svwhilewr_u16(op1: *const u16, op2: *const u16) -> svbool_t {
23762 svwhilewr_16ptr::<u16>(op1, op2)
23763}
23764#[doc = "While free of write-after-read conflicts"]
23765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u32])"]
23766#[doc = "## Safety"]
23767#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23768#[inline(always)]
23769#[target_feature(enable = "sve,sve2")]
23770#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23771#[cfg_attr(test, assert_instr(whilewr))]
23772pub unsafe fn svwhilewr_u32(op1: *const u32, op2: *const u32) -> svbool_t {
23773 svwhilewr_32ptr::<u32>(op1, op2)
23774}
23775#[doc = "While free of write-after-read conflicts"]
23776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u64])"]
23777#[doc = "## Safety"]
23778#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
23779#[inline(always)]
23780#[target_feature(enable = "sve,sve2")]
23781#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23782#[cfg_attr(test, assert_instr(whilewr))]
23783pub unsafe fn svwhilewr_u64(op1: *const u64, op2: *const u64) -> svbool_t {
23784 svwhilewr_64ptr::<u64>(op1, op2)
23785}
23786#[doc = "Bitwise exclusive OR and rotate right"]
23787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s8])"]
23788#[inline(always)]
23789#[target_feature(enable = "sve,sve2")]
23790#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23791#[cfg_attr(test, assert_instr(xar, IMM3 = 1))]
23792pub fn svxar_n_s8<const IMM3: i32>(op1: svint8_t, op2: svint8_t) -> svint8_t {
23793 static_assert_range!(IMM3, 1..=8);
23794 unsafe extern "unadjusted" {
23795 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv16i8")]
23796 fn _svxar_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t;
23797 }
23798 unsafe { _svxar_n_s8(op1, op2, IMM3) }
23799}
23800#[doc = "Bitwise exclusive OR and rotate right"]
23801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s16])"]
23802#[inline(always)]
23803#[target_feature(enable = "sve,sve2")]
23804#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23805#[cfg_attr(test, assert_instr(xar, IMM3 = 1))]
23806pub fn svxar_n_s16<const IMM3: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
23807 static_assert_range!(IMM3, 1..=16);
23808 unsafe extern "unadjusted" {
23809 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv8i16")]
23810 fn _svxar_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t;
23811 }
23812 unsafe { _svxar_n_s16(op1, op2, IMM3) }
23813}
23814#[doc = "Bitwise exclusive OR and rotate right"]
23815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s32])"]
23816#[inline(always)]
23817#[target_feature(enable = "sve,sve2")]
23818#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23819#[cfg_attr(test, assert_instr(xar, IMM3 = 1))]
23820pub fn svxar_n_s32<const IMM3: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
23821 static_assert_range!(IMM3, 1..=32);
23822 unsafe extern "unadjusted" {
23823 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv4i32")]
23824 fn _svxar_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
23825 }
23826 unsafe { _svxar_n_s32(op1, op2, IMM3) }
23827}
23828#[doc = "Bitwise exclusive OR and rotate right"]
23829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s64])"]
23830#[inline(always)]
23831#[target_feature(enable = "sve,sve2")]
23832#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23833#[cfg_attr(test, assert_instr(xar, IMM3 = 1))]
23834pub fn svxar_n_s64<const IMM3: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
23835 static_assert_range!(IMM3, 1..=64);
23836 unsafe extern "unadjusted" {
23837 #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv2i64")]
23838 fn _svxar_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t;
23839 }
23840 unsafe { _svxar_n_s64(op1, op2, IMM3) }
23841}
23842#[doc = "Bitwise exclusive OR and rotate right"]
23843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u8])"]
23844#[inline(always)]
23845#[target_feature(enable = "sve,sve2")]
23846#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23847#[cfg_attr(test, assert_instr(xar, IMM3 = 1))]
23848pub fn svxar_n_u8<const IMM3: i32>(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
23849 static_assert_range!(IMM3, 1..=8);
23850 unsafe { svxar_n_s8::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
23851}
23852#[doc = "Bitwise exclusive OR and rotate right"]
23853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u16])"]
23854#[inline(always)]
23855#[target_feature(enable = "sve,sve2")]
23856#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23857#[cfg_attr(test, assert_instr(xar, IMM3 = 1))]
23858pub fn svxar_n_u16<const IMM3: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
23859 static_assert_range!(IMM3, 1..=16);
23860 unsafe { svxar_n_s16::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
23861}
23862#[doc = "Bitwise exclusive OR and rotate right"]
23863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u32])"]
23864#[inline(always)]
23865#[target_feature(enable = "sve,sve2")]
23866#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23867#[cfg_attr(test, assert_instr(xar, IMM3 = 1))]
23868pub fn svxar_n_u32<const IMM3: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
23869 static_assert_range!(IMM3, 1..=32);
23870 unsafe { svxar_n_s32::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
23871}
23872#[doc = "Bitwise exclusive OR and rotate right"]
23873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u64])"]
23874#[inline(always)]
23875#[target_feature(enable = "sve,sve2")]
23876#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23877#[cfg_attr(test, assert_instr(xar, IMM3 = 1))]
23878pub fn svxar_n_u64<const IMM3: i32>(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
23879 static_assert_range!(IMM3, 1..=64);
23880 unsafe { svxar_n_s64::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
23881}